code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = 2
__magic_name__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(A_ )
if n > 1:
factors.append(A_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _snake_case ( pl.LightningModule ):
def __init__( self , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : List[str] = model
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : int = nn.Linear(self.model.config.hidden_size , self.num_labels)
def snake_case__ ( self):
pass
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : int = LongformerModel.from_pretrained(__A )
UpperCAmelCase__ : Any = LightningModel(__A )
UpperCAmelCase__ : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
UpperCAmelCase__ : List[Any] = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A =parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
) | 357 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Optional[int] = '''BridgeTowerImageProcessor'''
lowerCAmelCase :List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__(_lowerCamelCase , _lowerCamelCase)
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
UpperCAmelCase__ : Optional[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase)
encoding.update(_lowerCamelCase)
return encoding
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) | 283 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class __snake_case ( __A ):
a__ = """rwkv"""
a__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self , lowercase=5_02_77 , lowercase=10_24 , lowercase=40_96 , lowercase=32 , lowercase=None , lowercase=None , lowercase=1e-5 , lowercase=0 , lowercase=0 , lowercase=6 , lowercase=False , lowercase=True , **lowercase , ) -> List[str]:
'''simple docstring'''
a__: Tuple = vocab_size
a__: List[str] = context_length
a__: Dict = hidden_size
a__: Optional[int] = num_hidden_layers
a__: Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
a__: Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
a__: Any = layer_norm_epsilon
a__: List[Any] = rescale_every
a__: List[str] = use_cache
a__: List[str] = bos_token_id
a__: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
| 290 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 268 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Union[str, Any] = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : Dict = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''wavlm'''
def __init__( self : Optional[int] , __a : Any=32 , __a : Optional[int]=768 , __a : Optional[int]=12 , __a : Optional[int]=12 , __a : Dict=3072 , __a : List[str]="gelu" , __a : Union[str, Any]=0.1 , __a : int=0.1 , __a : int=0.1 , __a : List[Any]=0.0 , __a : str=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[int]=0.0_2 , __a : List[str]=1e-5 , __a : int="group" , __a : Tuple="gelu" , __a : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , __a : Any=(5, 2, 2, 2, 2, 2, 2) , __a : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , __a : Union[str, Any]=False , __a : Optional[Any]=128 , __a : List[str]=16 , __a : Dict=320 , __a : Tuple=800 , __a : List[str]=False , __a : Optional[Any]=True , __a : Dict=0.0_5 , __a : Dict=10 , __a : List[Any]=2 , __a : Tuple=0.0 , __a : Dict=10 , __a : List[Any]=320 , __a : Optional[int]=2 , __a : Union[str, Any]=0.1 , __a : Optional[Any]=100 , __a : Union[str, Any]=256 , __a : str=256 , __a : Union[str, Any]=0.1 , __a : str="mean" , __a : str=False , __a : str=False , __a : Optional[int]=256 , __a : Union[str, Any]=(512, 512, 512, 512, 1500) , __a : int=(5, 3, 3, 1, 1) , __a : Optional[Any]=(1, 2, 3, 1, 1) , __a : Union[str, Any]=512 , __a : Optional[Any]=80 , __a : Tuple=0 , __a : Optional[Any]=1 , __a : List[Any]=2 , __a : List[Any]=False , __a : Any=3 , __a : Any=2 , __a : Optional[Any]=3 , __a : Optional[Any]=None , **__a : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
__snake_case : Tuple = hidden_size
__snake_case : Optional[Any] = feat_extract_norm
__snake_case : List[str] = feat_extract_activation
__snake_case : str = list(__a )
__snake_case : List[str] = list(__a )
__snake_case : List[str] = list(__a )
__snake_case : Any = conv_bias
__snake_case : Tuple = num_buckets
__snake_case : List[Any] = max_bucket_distance
__snake_case : List[Any] = num_conv_pos_embeddings
__snake_case : str = num_conv_pos_embedding_groups
__snake_case : List[str] = len(self.conv_dim )
__snake_case : List[str] = num_hidden_layers
__snake_case : int = intermediate_size
__snake_case : Any = hidden_act
__snake_case : List[Any] = num_attention_heads
__snake_case : List[str] = hidden_dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Union[str, Any] = activation_dropout
__snake_case : List[str] = feat_proj_dropout
__snake_case : List[str] = final_dropout
__snake_case : List[str] = layerdrop
__snake_case : Any = layer_norm_eps
__snake_case : Any = initializer_range
__snake_case : Any = num_ctc_classes
__snake_case : Optional[Any] = vocab_size
__snake_case : str = do_stable_layer_norm
__snake_case : Any = use_weighted_layer_sum
__snake_case : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : str = apply_spec_augment
__snake_case : str = mask_time_prob
__snake_case : Union[str, Any] = mask_time_length
__snake_case : List[Any] = mask_time_min_masks
__snake_case : List[Any] = mask_feature_prob
__snake_case : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
__snake_case : int = num_codevectors_per_group
__snake_case : int = num_codevector_groups
__snake_case : List[str] = contrastive_logits_temperature
__snake_case : List[str] = num_negatives
__snake_case : int = codevector_dim
__snake_case : Optional[Any] = proj_codevector_dim
__snake_case : str = diversity_loss_weight
# ctc loss
__snake_case : Any = ctc_loss_reduction
__snake_case : Optional[int] = ctc_zero_infinity
# adapter
__snake_case : List[str] = add_adapter
__snake_case : List[str] = adapter_kernel_size
__snake_case : List[Any] = adapter_stride
__snake_case : Tuple = num_adapter_layers
__snake_case : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case : Optional[int] = list(__a )
__snake_case : Union[str, Any] = list(__a )
__snake_case : Optional[int] = list(__a )
__snake_case : Optional[Any] = xvector_output_dim
@property
def A_ ( self : List[Any] ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 0 |
'''simple docstring'''
from __future__ import annotations
A__ : List[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(_UpperCAmelCase ):
__snake_case , __snake_case : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
__snake_case : Optional[Any] = 0
return None
def a_ ( _UpperCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(_UpperCAmelCase ,end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
A__ : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 0 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a :Dict = []
for part_id in partition_order:
a :str = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(UpperCAmelCase_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
a :Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a :List[Any] = spark.range(100 ).repartition(1 )
a :Any = Spark(UpperCAmelCase_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a :Tuple = spark.range(10 ).repartition(2 )
a :Optional[Any] = [1, 0]
a :Any = _generate_iterable_examples(UpperCAmelCase_ , UpperCAmelCase_ ) # Reverse the partitions.
a :int = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase_ , UpperCAmelCase_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a , a :int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a :List[str] = spark.range(10 ).repartition(1 )
a :str = SparkExamplesIterable(UpperCAmelCase_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCAmelCase_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
a :List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a :Dict = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a :Optional[int] = lambda UpperCAmelCase_ : x.reverse()
a :Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase_ , [2, 1, 0] )
a :str = SparkExamplesIterable(UpperCAmelCase_ ).shuffle_data_sources(UpperCAmelCase_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCAmelCase_ ):
a , a :str = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
a :List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a :Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a :Tuple = SparkExamplesIterable(UpperCAmelCase_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a :List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase_ ):
a , a :List[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a :Tuple = SparkExamplesIterable(UpperCAmelCase_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a :Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase_ ):
a , a :Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
a :List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a :Dict = spark.range(100 ).repartition(1 )
a :Dict = Spark(UpperCAmelCase_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 94 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 0 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Checks if the entire collection has been sorted
if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE__ , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE__ , n - 1 )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Checks order between adjacent elements
if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowerCamelCase , __lowerCamelCase : List[Any] = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE__ , index + 1 )
if __name__ == "__main__":
lowercase_ = input('Enter integers separated by spaces: ')
lowercase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 194 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit_text_model"""
def __init__( self: Optional[int] , a: Dict=4_9408 , a: Optional[Any]=512 , a: Dict=2048 , a: Optional[Any]=12 , a: Tuple=8 , a: Union[str, Any]=16 , a: str="quick_gelu" , a: List[Any]=1e-5 , a: Dict=0.0 , a: Optional[int]=0.0_2 , a: Dict=1.0 , a: Any=0 , a: Union[str, Any]=4_9406 , a: Any=4_9407 , **a: Dict , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Tuple = initializer_factor
@classmethod
def _snake_case ( cls: Dict , a: Union[str, os.PathLike] , **a: Optional[int] ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Optional[Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit_vision_model"""
def __init__( self: int , a: Tuple=768 , a: int=3072 , a: List[str]=12 , a: Optional[Any]=12 , a: Optional[int]=3 , a: Optional[int]=768 , a: Optional[Any]=32 , a: Optional[int]="quick_gelu" , a: Union[str, Any]=1e-5 , a: Union[str, Any]=0.0 , a: Union[str, Any]=0.0_2 , a: int=1.0 , **a: Union[str, Any] , ):
super().__init__(**a )
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : int = num_channels
__lowerCamelCase : Optional[Any] = image_size
__lowerCamelCase : Tuple = patch_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : List[Any] = initializer_factor
@classmethod
def _snake_case ( cls: Optional[int] , a: Union[str, os.PathLike] , **a: int ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit"""
__snake_case = True
def __init__( self: Dict , a: int=None , a: str=None , a: Tuple=512 , a: Tuple=2.6_5_9_2 , a: int=True , **a: int , ):
super().__init__(**a )
if text_config is None:
__lowerCamelCase : List[str] = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__lowerCamelCase : str = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__lowerCamelCase : List[Any] = OwlViTTextConfig(**a )
__lowerCamelCase : str = OwlViTVisionConfig(**a )
__lowerCamelCase : Union[str, Any] = projection_dim
__lowerCamelCase : Tuple = logit_scale_init_value
__lowerCamelCase : Dict = return_dict
__lowerCamelCase : Tuple = 1.0
@classmethod
def _snake_case ( cls: str , a: Union[str, os.PathLike] , **a: List[Any] ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : List[Any] = cls.get_config_dict(a , **a )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
@classmethod
def _snake_case ( cls: Tuple , a: Dict , a: Dict , **a: str ):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[str] = text_config
__lowerCamelCase : Optional[int] = vision_config
return cls.from_dict(a , **a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : List[Any] = self.text_config.to_dict()
__lowerCamelCase : List[str] = self.vision_config.to_dict()
__lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: str ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def _snake_case ( self: Dict ):
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def _snake_case ( self: int ):
return 1e-4
def _snake_case ( self: Any , a: "ProcessorMixin" , a: int = -1 , a: int = -1 , a: Optional["TensorType"] = None , ):
__lowerCamelCase : List[str] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=a , seq_length=a , framework=a )
__lowerCamelCase : int = super().generate_dummy_inputs(
processor.image_processor , batch_size=a , framework=a )
return {**text_input_dict, **image_input_dict}
@property
def _snake_case ( self: int ):
return 14
| 194 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be โฅ 0''' )
snake_case_ : int = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
snake_case_ : List[Any] = 1
if upper_limit > 0:
snake_case_ : Union[str, Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_UpperCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (โฅ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowerCAmelCase_ = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 279 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case_ : List[Any] = parser.parse_args()
return args
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
def fn(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
return fn
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case_ : Any = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase )
snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase )
snake_case_ : Optional[Any] = example.SerializeToString()
records.append(_UpperCamelCase )
return records
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit )
snake_case_ : int = dataset.select(range(_UpperCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case_ : str = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
else:
snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase )
snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCamelCase ):
# Concatenate all texts.
snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart ๐ซ
snake_case_ : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case_ : Union[str, Any] = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 )
snake_case_ : str = 0
snake_case_ : Optional[Any] = 0
for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ):
snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size]
snake_case_ : str = len(dataset_snapshot['''input_ids'''] )
snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case_ : Dict = get_serialized_examples(_UpperCamelCase )
with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file:
for i in range(len(_UpperCamelCase ) ):
snake_case_ : List[str] = serialized_examples[i]
out_file.write(_UpperCamelCase )
print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 279 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Optional[Any]=30 , _UpperCAmelCase : Optional[Any]=400 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str=True , _UpperCAmelCase : int=[0.5, 0.5, 0.5] , _UpperCAmelCase : List[Any]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = EfficientFormerImageProcessorTester(self )
@property
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 361 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = "utf-8"
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True # deprecated
UpperCamelCase = None # deprecated
UpperCamelCase = 10 << 20 # 10MB
UpperCamelCase = None
class lowercase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCamelCase = JsonConfig
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
UpperCAmelCase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Tuple , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def lowercase__ ( self : str , _UpperCAmelCase : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase_ = self.config.features.arrow_schema.field(_UpperCAmelCase ).type
UpperCAmelCase_ = pa_table.append_column(_UpperCAmelCase , pa.array([None] * len(_UpperCAmelCase ) , type=_UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
# We keep only the field we are interested in
UpperCAmelCase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_UpperCAmelCase , (list, tuple) ):
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
else:
UpperCAmelCase_ = dataset
UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase )
yield file_idx, self._cast_table(_UpperCAmelCase )
# If the file has one json object per line
else:
with open(_UpperCAmelCase , "rb" ) as f:
UpperCAmelCase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
UpperCAmelCase_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ = batch.decode(self.config.encoding , errors=_UpperCAmelCase ).encode("utf-8" )
try:
while True:
try:
UpperCAmelCase_ = paj.read_json(
io.BytesIO(_UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=_UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(_UpperCAmelCase )
or block_size > len(_UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_UpperCAmelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_UpperCAmelCase , _UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_UpperCAmelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase )
batch_idx += 1
| 241 | 0 |
import math
def A_ ( ) -> None:
UpperCamelCase : List[Any] = input("Enter message: " )
UpperCamelCase : Optional[Any] = int(input(F"""Enter key [2-{len(_snake_case ) - 1}]: """ ) )
UpperCamelCase : Tuple = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCamelCase : int = encrypt_message(_snake_case , _snake_case )
elif mode.lower().startswith("d" ):
UpperCamelCase : List[Any] = decrypt_message(_snake_case , _snake_case )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[Any] = [""] * key
for col in range(_snake_case ):
UpperCamelCase : List[Any] = col
while pointer < len(_snake_case ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_snake_case )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : List[str] = math.ceil(len(_snake_case ) / key )
UpperCamelCase : List[Any] = key
UpperCamelCase : Dict = (num_cols * num_rows) - len(_snake_case )
UpperCamelCase : Optional[int] = [""] * num_cols
UpperCamelCase : str = 0
UpperCamelCase : Dict = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCamelCase : Optional[int] = 0
row += 1
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( _snake_case : Tuple ) -> Dict:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
elif args.student_type == "gpt2":
_A = False
def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' )
_A = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_snake_case ) , _snake_case , indent=4 )
git_log(args.dump_path )
_A , _A , _A = MODEL_CLASSES[args.student_type]
_A , _A , _A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_A = tokenizer.all_special_tokens.index(_snake_case )
_A = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_A = special_tok_ids
_A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
_A = pickle.load(_snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
_A = pickle.load(_snake_case )
_A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_A = 0.0 # do not predict special tokens
_A = torch.from_numpy(_snake_case )
else:
_A = None
_A = LmSeqsDataset(params=_snake_case , data=_snake_case )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_A = student_config_class.from_pretrained(args.student_config )
_A = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case )
else:
_A = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
_A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case , _snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case , _snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_A = Distiller(
params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 315 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_A = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
_A = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase_ = int(re.match(r'''.*layer_(\d*).*''' , _A )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __UpperCamelCase ( _A ):
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase_ = re.search(r'''[^\d](\d+)$''' , str(_A ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
lowerCAmelCase_ = int(bit_search.groups()[0] )
return bit_size // 8
def __UpperCamelCase ( _A , _A , _A , _A , _A ):
# Construct model
if bloom_config_file == "":
lowerCAmelCase_ = BloomConfig()
else:
lowerCAmelCase_ = BloomConfig.from_json_file(_A )
if shard_model:
lowerCAmelCase_ = os.listdir(_A )
lowerCAmelCase_ = sorted(filter(lambda _A : s.startswith('''layer''' ) and "model_00" in s , _A ) )
lowerCAmelCase_ = {'''weight_map''': {}, '''metadata''': {}}
lowerCAmelCase_ = 0
lowerCAmelCase_ = None
lowerCAmelCase_ = BloomConfig()
for j, file in enumerate(_A ):
print('''Processing file: {}'''.format(_A ) )
lowerCAmelCase_ = None
for i in range(_A ):
# load all TP files
lowerCAmelCase_ = file.replace('''model_00''' , f"model_0{i}" )
lowerCAmelCase_ = torch.load(os.path.join(_A , _A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase_ = list(temp.keys() )
for key in keys:
lowerCAmelCase_ = temp.pop(_A )
if tensors is None:
lowerCAmelCase_ = temp
else:
for key in tensors.keys():
if any(key.endswith(_A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=_A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ = tensors[key] / pretraining_tp
torch.save(
_A , os.path.join(
_A , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_A ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase_ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase_ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_A ) ).zfill(5 ) )
lowerCAmelCase_ = BloomConfig()
lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowerCAmelCase_ = total_size
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_A , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase_ = json.dumps(_A , indent=2 , sort_keys=_A ) + '''\n'''
f.write(_A )
else:
lowerCAmelCase_ = BloomModel(_A )
lowerCAmelCase_ = os.listdir(_A )
lowerCAmelCase_ = sorted(filter(lambda _A : s.startswith('''layer''' ) and "model_00" in s , _A ) )
lowerCAmelCase_ = None
for i, file in enumerate(_A ):
lowerCAmelCase_ = None
for i in range(_A ):
# load all TP files
lowerCAmelCase_ = file.replace('''model_00''' , f"model_0{i}" )
lowerCAmelCase_ = torch.load(os.path.join(_A , _A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase_ = list(temp.keys() )
for key in keys:
lowerCAmelCase_ = temp.pop(_A )
if tensors is None:
lowerCAmelCase_ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=_A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ = tensors[key] / pretraining_tp
lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
lowerCAmelCase_ = set(other_keys.missing_keys )
else:
lowerCAmelCase_ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_A , exist_ok=_A )
lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
lowerCAmelCase_ = model.to(config.torch_dtype )
torch.save(model.state_dict() , _A )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
_A = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 356 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = ['pixel_values']
def __init__( self, UpperCamelCase__ = True, UpperCamelCase__ = 32, UpperCamelCase__=PILImageResampling.BILINEAR, UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = size_divisor
lowerCAmelCase_ = resample
super().__init__(**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase_ = height // size_divisor * size_divisor
lowerCAmelCase_ = width // size_divisor * size_divisor
lowerCAmelCase_ = resize(UpperCamelCase__, (new_h, new_w), resample=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
return image
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
return rescale(image=UpperCamelCase__, scale=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = ChannelDimension.FIRST, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCAmelCase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
lowerCAmelCase_ = [self.resize(UpperCamelCase__, size_divisor=UpperCamelCase__, resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(UpperCamelCase__, scale=1 / 255 ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(UpperCamelCase__, UpperCamelCase__ ) for image in images]
lowerCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__, tensor_type=UpperCamelCase__ )
| 167 | 0 |
'''simple docstring'''
_lowerCamelCase : str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __lowerCamelCase ( A__ ) -> bytes:
"""simple docstring"""
# Make sure the supplied data is a bytes-like object
if not isinstance(A__ , A__ ):
UpperCamelCase = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(A__ )
UpperCamelCase = ''.join(bin(A__ )[2:].zfill(8 ) for byte in data )
UpperCamelCase = len(A__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase = B'=' * ((6 - len(A__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A__ ) % 6)
else:
UpperCamelCase = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A__ ) , 6 ) ).encode()
+ padding
)
def __lowerCamelCase ( A__ ) -> bytes:
"""simple docstring"""
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(A__ , A__ ) and not isinstance(A__ , A__ ):
UpperCamelCase = (
'argument should be a bytes-like object or ASCII string, '
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(A__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A__ , A__ ):
try:
UpperCamelCase = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
UpperCamelCase = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase = encoded_data[:-padding]
UpperCamelCase = ''.join(
bin(B64_CHARSET.index(A__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase = ''.join(
bin(B64_CHARSET.index(A__ ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A__ ) , 8 )
]
return bytes(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCamelCase: List[Any] = logging.getLogger(__name__)
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether tp freeze the encoder.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_lowerCamelCase = field(
default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}, )
_lowerCamelCase = field(
default=1_024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=128, metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(default=-1, metadata={'help': '# training examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# validation examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# test examples. -1 means use all.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Source language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Target language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': '# num_beams to use for evaluation.'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'}, )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , f'''{split}_results.json''' ) )
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : int = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : Dict = SeqaSeqDataset
# Get datasets
lowercase : int = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
lowercase : str = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Optional[Any] = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase ) if training_args.predict_with_generate else None
)
lowercase : List[Any] = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
lowercase : List[Any] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
lowercase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : List[str] = train_result.metrics
lowercase : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase : Tuple = trainer.evaluate(metric_key_prefix='val' )
lowercase : Dict = data_args.n_val
lowercase : Tuple = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
lowercase : List[Any] = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test' )
lowercase : str = test_output.metrics
lowercase : Dict = data_args.n_test
if trainer.is_world_process_zero():
lowercase : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.predict_with_generate:
lowercase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
lowercase : Tuple = lmap(str.strip , _UpperCAmelCase )
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 255 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE_ = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE_ = object()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__snake_case ) - len(__snake_case ) + 1 ):
SCREAMING_SNAKE_CASE = [x.match(__snake_case ) for x, y in zip(__snake_case , ks[i:] )]
if matches and all(__snake_case ):
return True
return False
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
def replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for rule, replacement in rules:
if _match(__snake_case , __snake_case ):
return replacement
return val
return replace
def __lowercase ( ) -> str:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __snake_case )),
(("transformer", "wte", "embedding"), P("""mp""" , __snake_case )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __snake_case )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__snake_case , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __snake_case )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _get_partition_rules()
SCREAMING_SNAKE_CASE = _replacement_rules(__snake_case )
SCREAMING_SNAKE_CASE = {k: _unmatched for k in flatten_dict(__snake_case )}
SCREAMING_SNAKE_CASE = {k: replace(__snake_case , __snake_case ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__snake_case ) )
| 362 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = BertTokenizer
__snake_case : Dict = BertTokenizerFast
__snake_case : Tuple = True
__snake_case : List[Any] = True
__snake_case : Optional[Any] = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""hรคllo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""HรคLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = """a\n'll !!to?'d of, can't."""
SCREAMING_SNAKE_CASE = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = F"""A, naรฏve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ ,"""do_lower_case""" ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##รฏ"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""็""", """ไบบ""", """ๆ"""]
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 193 | 0 |
"""simple docstring"""
import math
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_lowercase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__a = "Enter the base and the power separated by a comma: "
__a , __a = map(int, input(prompt).split(","))
__a , __a = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
__a = res(xa, ya)
__a = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 66 |
"""simple docstring"""
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(A_ )
elif element > pivot:
greater.append(A_ )
else:
equal.append(A_ )
return less, equal, greater
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A_ ) or index < 0:
return None
lowerCAmelCase__ : str = items[random.randint(0 , len(A_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = _partition(A_ , A_ )
lowerCAmelCase__ : str = len(A_ )
lowerCAmelCase__ : Optional[Any] = len(A_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A_ , A_ )
# must be in larger
else:
return quick_select(A_ , index - (m + count) )
| 106 | 0 |
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCAmelCase (UpperCamelCase_ : int = 100 ):
'''simple docstring'''
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Tuple = 2
for i in range(2 , max_n + 1 ):
_lowerCAmelCase : List[str] = pre_numerator
_lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
_lowerCAmelCase : Tuple = cur_numerator
_lowerCAmelCase : str = e_cont * pre_numerator + temp
return sum_digits(snake_case_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 370 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_lowerCamelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : List[str] = {
"allenai/led-base-16384": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : str = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""ยก""" ) , ord("""ยฌ""" ) + 1 ) ) + list(range(ord("""ยฎ""" ) , ord("""รฟ""" ) + 1 ) )
)
_lowerCAmelCase : Any = bs[:]
_lowerCAmelCase : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : int = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Dict = char
return pairs
class __snake_case (_a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any]="replace" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : List[str]="</s>" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Any="<pad>" , _UpperCAmelCase : Tuple="<mask>" , _UpperCAmelCase : int=False , **_UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
_lowerCAmelCase : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
_lowerCAmelCase : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
_lowerCAmelCase : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
_lowerCAmelCase : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
_lowerCAmelCase : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase : Dict = json.load(_UpperCAmelCase )
_lowerCAmelCase : Any = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Optional[Any] = errors # how to handle errors in decoding
_lowerCAmelCase : Dict = bytes_to_unicode()
_lowerCAmelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase : Tuple = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Any = tuple(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
_lowerCAmelCase : Optional[int] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : int = 0
while i < len(_UpperCAmelCase ):
try:
_lowerCAmelCase : Optional[int] = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : List[str] = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Tuple = tuple(_UpperCAmelCase )
_lowerCAmelCase : Dict = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
_lowerCAmelCase : Union[str, Any] = get_pairs(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = """ """.join(_UpperCAmelCase )
_lowerCAmelCase : Any = word
return word
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for token in re.findall(self.pat , _UpperCAmelCase ):
_lowerCAmelCase : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = """""".join(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + """\n""" )
_lowerCAmelCase : Dict = 0
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase : str = token_index
writer.write(""" """.join(_UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
_lowerCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=False , **_UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
_lowerCAmelCase : Optional[Any] = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
_lowerCAmelCase : List[Any] = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_lowerCAmelCase : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCAmelCase : str = len(encoded_inputs["""global_attention_mask"""] ) != len(_UpperCAmelCase )
if needs_to_be_padded:
_lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCAmelCase : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCAmelCase : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 159 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : int = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """bridgetower_vision_model"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Any=288 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[Any]=1E-05 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_factor
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = stop_gradient
SCREAMING_SNAKE_CASE : Dict = share_layernorm
SCREAMING_SNAKE_CASE : Union[str, Any] = remove_last_layer
@classmethod
def __A ( cls : List[str] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """bridgetower_text_model"""
def __init__( self : str , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Any=768 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=3072 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=514 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Union[str, Any]=1E-05 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : List[Any]=True , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE : str = pad_token_id
SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
@classmethod
def __A ( cls : Dict , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
SCREAMING_SNAKE_CASE : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """bridgetower"""
def __init__( self : Any , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Dict=1E-05 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Optional[Any]="add" , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Tuple=6 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Any=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''text_config_dict''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''vision_config_dict''' , UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = share_cross_modal_transformer_layers
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_factor
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = share_link_tower_layers
SCREAMING_SNAKE_CASE : int = link_tower_type
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = tie_word_embeddings
SCREAMING_SNAKE_CASE : str = init_layernorm_from_vision_encoder
if text_config is None:
SCREAMING_SNAKE_CASE : Dict = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
SCREAMING_SNAKE_CASE : Any = BridgeTowerTextConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = BridgeTowerVisionConfig(**UpperCamelCase__ )
@classmethod
def __A ( cls : Optional[int] , UpperCamelCase__ : BridgeTowerTextConfig , UpperCamelCase__ : BridgeTowerVisionConfig , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Dict = self.text_config.to_dict()
SCREAMING_SNAKE_CASE : List[str] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type
return output
| 182 | import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def A ( _lowercase , _lowercase , _lowercase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = is_small_dataset(_lowercase )
assert result == expected
| 182 | 1 |
'''simple docstring'''
def A_( A : str):
UpperCamelCase = [int(A) for i in ip_va_address.split('.') if i.isdigit()]
return len(A) == 4 and all(0 <= int(A) <= 254 for octet in octets)
if __name__ == "__main__":
lowerCAmelCase : int = input().strip()
lowerCAmelCase : Optional[Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 358 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """facebook/bart-large-mnli"""
lowerCAmelCase_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
lowerCAmelCase_ = """text_classifier"""
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSequenceClassification
lowerCAmelCase_ = ["""text""", ["""text"""]]
lowerCAmelCase_ = ["""text"""]
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
super().setup()
UpperCamelCase = self.model.config
UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
UpperCamelCase = int(A_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCAmelCase_ ( self , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = labels
return self.pre_processor(
[text] * len(A_ ) , [F'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = outputs.logits
UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 251 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __A ( A , A ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
A = 4 , A = 768 , A , A , ) -> Any:
"""simple docstring"""
super().__init__()
_a = nn.Parameter(torch.zeros(A ) )
# parameters for additional clip time embeddings
_a = nn.Linear(A , A )
_a = nn.Linear(A , A )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
A , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(A , A )
_a = nn.LayerNorm(A )
def a__ (self , *, A , A , A , A ) -> Optional[int]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
A , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(A )
_a = self.clip_image_embeddings_project_to_time_embeddings(A )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(A )
_a = clip_extra_context_tokens.reshape(A , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(A )
_a = self.text_encoder_hidden_states_norm(A )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 211 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 211 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , _A , _A , _A = None , _A = 50_257 , _A = 1_024 , _A = 768 , _A = 12 , _A = 12 , _A = None , _A = "gelu_new" , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 1e-5 , _A = 0.0_2 , _A = True , _A = True , _A = False , _A = False , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
__SCREAMING_SNAKE_CASE = prefix_inner_dim
__SCREAMING_SNAKE_CASE = prefix_hidden_dim
__SCREAMING_SNAKE_CASE = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__SCREAMING_SNAKE_CASE = (
nn.Linear(self.prefix_hidden_dim , _A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__SCREAMING_SNAKE_CASE = GPTaConfig(
vocab_size=_A , n_positions=_A , n_embd=_A , n_layer=_A , n_head=_A , n_inner=_A , activation_function=_A , resid_pdrop=_A , embd_pdrop=_A , attn_pdrop=_A , layer_norm_epsilon=_A , initializer_range=_A , scale_attn_weights=_A , use_cache=_A , scale_attn_by_inverse_layer_idx=_A , reorder_and_upcast_attn=_A , )
__SCREAMING_SNAKE_CASE = GPTaLMHeadModel(_A )
def _A ( self , _A , _A , _A = None , _A = None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(_A )
__SCREAMING_SNAKE_CASE = self.encode_prefix(_A )
__SCREAMING_SNAKE_CASE = self.decode_prefix(_A )
__SCREAMING_SNAKE_CASE = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__SCREAMING_SNAKE_CASE = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__SCREAMING_SNAKE_CASE = torch.cat((dummy_token, input_ids) , dim=1 )
__SCREAMING_SNAKE_CASE = self.transformer(inputs_embeds=_A , labels=_A , attention_mask=_A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _A ( self , _A , _A ):
'''simple docstring'''
return torch.zeros(_A , self.prefix_length , dtype=torch.intaa , device=_A )
def _A ( self , _A ):
'''simple docstring'''
return self.encode_prefix(_A )
@torch.no_grad()
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.split(_A , 1 , dim=0 )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for feature in features:
__SCREAMING_SNAKE_CASE = self.decode_prefix(feature.to(_A ) ) # back to the clip feature
# Only support beam search for now
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.generate_beam(
input_embeds=_A , device=_A , eos_token_id=_A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__SCREAMING_SNAKE_CASE = torch.stack(_A )
__SCREAMING_SNAKE_CASE = torch.stack(_A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _A ( self , _A=None , _A=None , _A=None , _A = 5 , _A = 67 , _A = 1.0 , _A = None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = torch.ones(_A , device=_A , dtype=torch.int )
__SCREAMING_SNAKE_CASE = torch.zeros(_A , device=_A , dtype=torch.bool )
if input_embeds is not None:
__SCREAMING_SNAKE_CASE = input_embeds
else:
__SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(_A )
for i in range(_A ):
__SCREAMING_SNAKE_CASE = self.transformer(inputs_embeds=_A )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__SCREAMING_SNAKE_CASE = logits.softmax(-1 ).log()
if scores is None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = logits.topk(_A , -1 )
__SCREAMING_SNAKE_CASE = generated.expand(_A , *generated.shape[1:] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__SCREAMING_SNAKE_CASE = next_tokens
else:
__SCREAMING_SNAKE_CASE = tokens.expand(_A , *tokens.shape[1:] )
__SCREAMING_SNAKE_CASE = torch.cat((tokens, next_tokens) , dim=1 )
else:
__SCREAMING_SNAKE_CASE = -float(np.inf )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__SCREAMING_SNAKE_CASE = scores_sum / seq_lengths[:, None]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = scores_sum_average.view(-1 ).topk(_A , -1 )
__SCREAMING_SNAKE_CASE = next_tokens // scores_sum.shape[1]
__SCREAMING_SNAKE_CASE = seq_lengths[next_tokens_source]
__SCREAMING_SNAKE_CASE = next_tokens % scores_sum.shape[1]
__SCREAMING_SNAKE_CASE = next_tokens.unsqueeze(1 )
__SCREAMING_SNAKE_CASE = tokens[next_tokens_source]
__SCREAMING_SNAKE_CASE = torch.cat((tokens, next_tokens) , dim=1 )
__SCREAMING_SNAKE_CASE = generated[next_tokens_source]
__SCREAMING_SNAKE_CASE = scores_sum_average * seq_lengths
__SCREAMING_SNAKE_CASE = is_stopped[next_tokens_source]
__SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__SCREAMING_SNAKE_CASE = torch.cat((generated, next_token_embed) , dim=1 )
__SCREAMING_SNAKE_CASE = is_stopped + next_tokens.eq(_A ).squeeze()
if is_stopped.all():
break
__SCREAMING_SNAKE_CASE = scores / seq_lengths
__SCREAMING_SNAKE_CASE = scores.argsort(descending=_A )
# tokens tensors are already padded to max_seq_length
__SCREAMING_SNAKE_CASE = [tokens[i] for i in order]
__SCREAMING_SNAKE_CASE = torch.stack(_A , dim=0 )
__SCREAMING_SNAKE_CASE = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 358 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = LayoutLMTokenizer
UpperCamelCase__ : Any = LayoutLMTokenizerFast
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : int = True
def _A ( self ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _A ( self , **_A ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
__SCREAMING_SNAKE_CASE = 'unwanted, running'
return input_text, output_text
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def _A ( self ):
'''simple docstring'''
pass
| 118 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline
a : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
a : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
torch.manual_seed(0 )
UpperCamelCase__ : int = ControlNetModel(
block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), )
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=__magic_name__, set_alpha_to_one=__magic_name__, )
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
UpperCamelCase__ : Any = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
UpperCamelCase__ : Any = CLIPTextModel(__magic_name__ )
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : str = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=0 ) -> Tuple:
"""simple docstring"""
if str(__magic_name__ ).startswith('''mps''' ):
UpperCamelCase__ : int = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase__ : Any = 2
UpperCamelCase__ : Optional[int] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=__magic_name__, device=torch.device(__magic_name__ ), )
UpperCamelCase__ : Tuple = floats_tensor(control_image.shape, rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase__ : List[str] = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCamelCase__ : List[str] = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Tuple = StableDiffusionControlNetImgaImgPipeline
a : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
torch.manual_seed(0 )
def init_weights(__magic_name__ ):
if isinstance(__magic_name__, torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase__ : Dict = ControlNetModel(
block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = ControlNetModel(
block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=__magic_name__, set_alpha_to_one=__magic_name__, )
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
UpperCamelCase__ : str = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
UpperCamelCase__ : Dict = CLIPTextModel(__magic_name__ )
UpperCamelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : Tuple = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase__ : List[str] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=0 ) -> str:
"""simple docstring"""
if str(__magic_name__ ).startswith('''mps''' ):
UpperCamelCase__ : str = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase__ : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase__ : Optional[int] = 2
UpperCamelCase__ : List[str] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=__magic_name__, device=torch.device(__magic_name__ ), ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=__magic_name__, device=torch.device(__magic_name__ ), ),
]
UpperCamelCase__ : Dict = floats_tensor(control_image[0].shape, rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase__ : Optional[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCamelCase__ : List[str] = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = self.get_dummy_components()
UpperCamelCase__ : Union[str, Any] = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
UpperCamelCase__ : Tuple = 10.0
UpperCamelCase__ : Optional[Any] = 4
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Optional[Any] = steps
UpperCamelCase__ : Dict = scale
UpperCamelCase__ : str = pipe(**__magic_name__ )[0]
UpperCamelCase__ : Tuple = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Any = steps
UpperCamelCase__ : Optional[Any] = scale
UpperCamelCase__ : List[str] = pipe(**__magic_name__, control_guidance_start=0.1, control_guidance_end=0.2 )[0]
UpperCamelCase__ : List[str] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : int = steps
UpperCamelCase__ : Tuple = scale
UpperCamelCase__ : str = pipe(**__magic_name__, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase__ : Dict = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Optional[int] = steps
UpperCamelCase__ : Tuple = scale
UpperCamelCase__ : List[str] = pipe(**__magic_name__, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.get_dummy_components()
UpperCamelCase__ : str = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__magic_name__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCamelCase__ : Any = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', safety_checker=__magic_name__, controlnet=__magic_name__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ : str = '''evil space-punk bird'''
UpperCamelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCamelCase__ : int = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCamelCase__ : Optional[int] = pipe(
__magic_name__, __magic_name__, control_image=__magic_name__, generator=__magic_name__, output_type='''np''', num_inference_steps=50, strength=0.6, )
UpperCamelCase__ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 201 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ : str = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
a_ : int = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
a_ : Tuple = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=False ):
"""simple docstring"""
if rouge_types is None:
lowerCamelCase_ = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
lowerCamelCase_ = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase , use_stemmer=UpperCamelCase )
if use_aggregator:
lowerCamelCase_ = scoring.BootstrapAggregator()
else:
lowerCamelCase_ = []
for ref, pred in zip(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = scorer.score(UpperCamelCase , UpperCamelCase )
if use_aggregator:
aggregator.add_scores(UpperCamelCase )
else:
scores.append(UpperCamelCase )
if use_aggregator:
lowerCamelCase_ = aggregator.aggregate()
else:
lowerCamelCase_ = {}
for key in scores[0]:
lowerCamelCase_ = [score[key] for score in scores]
return result
| 55 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 361 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
snake_case_ = False
snake_case_ = True
snake_case_ = False
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case_ = parser.parse_args()
snake_case_ = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
snake_case_ = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
snake_case_ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
snake_case_ = reader.read()
snake_case_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
snake_case_ = UNetaDModel(**config)
else:
snake_case_ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
snake_case_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
snake_case_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
snake_case_ = config[key]
del config[key]
snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
snake_case_ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
snake_case_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
snake_case_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
snake_case_ = param_value
snake_case_ = True
if not has_changed:
snake_case_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 216 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(_A )
# Let's go
lowercase = parser.parse_args()
if not hasattr(_A , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase = args.func(_A )
service.run()
if __name__ == "__main__":
main()
| 220 |
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = PegasusTokenizer
__UpperCamelCase = PegasusTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def A__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PegasusTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ (self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """</s>"""
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(lowerCamelCase ) , 1_103 )
def A__ (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
_lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowerCAmelCase = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_lowerCAmelCase = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
_lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
_lowerCAmelCase = """To ensure a smooth flow of bank resolutions."""
_lowerCAmelCase = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
_lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ["""This is going to be way too long.""" * 150, """short example"""]
_lowerCAmelCase = ["""not super long but more than 5 tokens""", """tiny"""]
_lowerCAmelCase = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
_lowerCAmelCase = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = {"""input_ids""": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = PegasusTokenizer
__UpperCamelCase = PegasusTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def A__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PegasusTokenizer(lowerCamelCase , offset=0 , mask_token_sent=lowerCamelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ (self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
_lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ["""This is going to be way too long.""" * 1_000, """short example"""]
_lowerCAmelCase = ["""not super long but more than 5 tokens""", """tiny"""]
_lowerCAmelCase = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
_lowerCAmelCase = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_lowerCAmelCase = self._large_tokenizer(lowerCamelCase ).input_ids
self.assertListEqual(
lowerCamelCase , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , ) | 367 |
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt()) | 317 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 169 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ : str = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
re.sub('<n>' , '' , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) ) | 225 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__A : List[str] = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=lowercase__ , exist_ok=lowercase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , use_external_data_format=lowercase__ , enable_onnx_checker=lowercase__ , opset_version=lowercase__ , )
else:
export(
lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , opset_version=lowercase__ , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
A = "cpu"
A = Path(lowercase__ )
# VAE DECODER
A = AutoencoderKL.from_pretrained(model_path + "/vae" )
A = vae_decoder.config.latent_channels
# forward only through the decoder part
A = vae_decoder.decode
onnx_export(
lowercase__ , model_args=(
torch.randn(1 , lowercase__ , 25 , 25 ).to(device=lowercase__ , dtype=lowercase__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowercase__ , )
del vae_decoder
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__A : Union[str, Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 370 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__A : Any = open # noqa: we just need to have a builtin inside this module to test it properly
| 57 | 0 |
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = args.log_outputs
lowerCAmelCase__ : Union[str, Any] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowerCAmelCase__ : Dict = load_metric('wer' )
lowerCAmelCase__ : Tuple = load_metric('cer' )
# compute metrics
lowerCAmelCase__ : Dict = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowerCAmelCase__ : int = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowerCAmelCase__ : Optional[int] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(SCREAMING_SNAKE_CASE_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase__ : List[str] = F'''log_{dataset_id}_predictions.txt'''
lowerCAmelCase__ : Union[str, Any] = F'''log_{dataset_id}_targets.txt'''
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as p, open(SCREAMING_SNAKE_CASE_ , 'w' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(SCREAMING_SNAKE_CASE_ , with_indices=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : List[str] = '[,?.!\-\;\:"โ%โโ๏ฟฝโโโฆโ]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase__ : Union[str, Any] = re.sub(SCREAMING_SNAKE_CASE_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase__ : List[Any] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
lowerCAmelCase__ : Optional[int] = ' '.join(text.split(SCREAMING_SNAKE_CASE_ ) )
return text
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# load dataset
lowerCAmelCase__ : Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase__ : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase__ : Union[str, Any] = dataset.cast_column('audio' , Audio(sampling_rate=SCREAMING_SNAKE_CASE_ ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase__ : List[Any] = 0 if torch.cuda.is_available() else -1
lowerCAmelCase__ : List[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase__ : int = prediction['text']
lowerCAmelCase__ : Optional[int] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowerCAmelCase__ : Dict = dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with ๐ค Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with ๐ค Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase__ = parser.parse_args()
main(args) | 212 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 352 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : Dict = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[str] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : str = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Optional[int] = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : List[str] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
if len(UpperCAmelCase__) != len(UpperCAmelCase__):
raise ValueError(
f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""")
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""")
A__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
| 14 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 14 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = 42
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
a = 42
a = (16, 32, 96, 2_56)
a = jnp.floataa
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__ = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE__ = self.block_out_channels[i]
SCREAMING_SNAKE_CASE__ = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE__ = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = blocks
SCREAMING_SNAKE_CASE__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , __lowerCamelCase : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.conv_in(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.silu(__lowerCamelCase )
for block in self.blocks:
SCREAMING_SNAKE_CASE__ = block(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.silu(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.conv_out(__lowerCamelCase )
return embedding
@flax_register_to_config
class UpperCAmelCase__ ( nn.Module , A__ , A__ ):
"""simple docstring"""
a = 32
a = 4
a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a = False
a = (3_20, 6_40, 12_80, 12_80)
a = 2
a = 8
a = None
a = 12_80
a = 0.0
a = False
a = jnp.floataa
a = True
a = 0
a = "rgb"
a = (16, 32, 96, 2_56)
def lowercase_ ( self : Any , __lowerCamelCase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE__ = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE__ = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE__ = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = jax.random.split(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["params"]
def lowercase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.block_out_channels
SCREAMING_SNAKE_CASE__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE__ = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE__ = FlaxTimestepEmbedding(__lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE__ = self.only_cross_attention
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = block_out_channels[0]
SCREAMING_SNAKE_CASE__ = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = block_out_channels[i]
SCREAMING_SNAKE_CASE__ = i == len(__lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE__ = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE__ = FlaxDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCamelCase )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE__ = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
if not is_final_block:
SCREAMING_SNAKE_CASE__ = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = down_blocks
SCREAMING_SNAKE_CASE__ = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE__ = block_out_channels[-1]
SCREAMING_SNAKE_CASE__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__ = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : float = 1.0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
SCREAMING_SNAKE_CASE__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE__ = jnp.flip(__lowerCamelCase , axis=1 )
# 1. time
if not isinstance(__lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = jnp.expand_dims(__lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE__ = self.time_proj(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.time_embedding(__lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE__ = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__ = self.conv_in(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__ = self.controlnet_cond_embedding(__lowerCamelCase )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = down_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = down_block(__lowerCamelCase , __lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE__ = self.mid_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE__ = ()
for down_block_res_sample, controlnet_block in zip(__lowerCamelCase , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE__ = controlnet_block(__lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE__ = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE__ = self.controlnet_mid_block(__lowerCamelCase )
# 6. scaling
SCREAMING_SNAKE_CASE__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCamelCase , mid_block_res_sample=__lowerCamelCase )
| 218 |
import argparse
import copy
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
with open(_A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[1], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[0], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
with open(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.read(1 )
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = 0
while visiting not in first_solution:
SCREAMING_SNAKE_CASE__ = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_A ) and k[0] not in first_solution:
SCREAMING_SNAKE_CASE__ = k[1]
SCREAMING_SNAKE_CASE__ = k[0]
first_solution.append(_A )
SCREAMING_SNAKE_CASE__ = distance_of_first_solution + int(_A )
SCREAMING_SNAKE_CASE__ = best_node
first_solution.append(_A )
SCREAMING_SNAKE_CASE__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
SCREAMING_SNAKE_CASE__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for n in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(_A )
for kn in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(_A )
if n == kn:
continue
SCREAMING_SNAKE_CASE__ = copy.deepcopy(_A )
SCREAMING_SNAKE_CASE__ = kn
SCREAMING_SNAKE_CASE__ = n
SCREAMING_SNAKE_CASE__ = 0
for k in _tmp[:-1]:
SCREAMING_SNAKE_CASE__ = _tmp[_tmp.index(_A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
SCREAMING_SNAKE_CASE__ = distance + int(i[1] )
_tmp.append(_A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
SCREAMING_SNAKE_CASE__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = first_solution
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = distance_of_first_solution
SCREAMING_SNAKE_CASE__ = solution
while count <= iters:
SCREAMING_SNAKE_CASE__ = find_neighborhood(_A , _A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
SCREAMING_SNAKE_CASE__ = len(_A ) - 1
SCREAMING_SNAKE_CASE__ = False
while not found:
SCREAMING_SNAKE_CASE__ = 0
while i < len(_A ):
if best_solution[i] != solution[i]:
SCREAMING_SNAKE_CASE__ = best_solution[i]
SCREAMING_SNAKE_CASE__ = solution[i]
break
SCREAMING_SNAKE_CASE__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = best_solution[:-1]
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
SCREAMING_SNAKE_CASE__ = cost
SCREAMING_SNAKE_CASE__ = solution
else:
SCREAMING_SNAKE_CASE__ = index_of_best_solution + 1
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
if len(_A ) >= size:
tabu_list.pop(0 )
SCREAMING_SNAKE_CASE__ = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase_ ( _A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = generate_neighbours(args.File )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = generate_first_solution(
args.File , _A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = tabu_search(
_A , _A , _A , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 218 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase__( )->Union[str, Any]:
A__ = 9
A__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A__ = kruskal(UpperCamelCase__ , UpperCamelCase__ )
A__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(UpperCamelCase__ ) == sorted(UpperCamelCase__ )
| 193 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a__: List[str] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''sequence-classification'''
def __init__( self,__lowerCamelCase ):
if type(__lowerCamelCase ) == dict:
A__ = Namespace(**__lowerCamelCase )
A__ = glue_output_modes[hparams.task]
A__ = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCamelCase,__lowerCamelCase,self.mode )
def UpperCamelCase ( self,**__lowerCamelCase ):
return self.model(**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A__ = self(**__lowerCamelCase )
A__ = outputs[0]
A__ = self.trainer.lr_schedulers[0]['''scheduler''']
A__ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase ( self ):
A__ = self.hparams
A__ = processors[args.task]()
A__ = processor.get_labels()
for mode in ["train", "dev"]:
A__ = self._feature_file(__lowerCamelCase )
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''',__lowerCamelCase )
else:
logger.info('''Creating features from dataset file at %s''',args.data_dir )
A__ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
A__ = convert_examples_to_features(
__lowerCamelCase,self.tokenizer,max_length=args.max_seq_length,label_list=self.labels,output_mode=args.glue_output_mode,)
logger.info('''Saving features into cached file %s''',__lowerCamelCase )
torch.save(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = False ):
A__ = '''dev''' if mode == '''test''' else mode
A__ = self._feature_file(__lowerCamelCase )
logger.info('''Loading features from cached file %s''',__lowerCamelCase )
A__ = torch.load(__lowerCamelCase )
A__ = torch.tensor([f.input_ids for f in features],dtype=torch.long )
A__ = torch.tensor([f.attention_mask for f in features],dtype=torch.long )
A__ = torch.tensor([f.token_type_ids for f in features],dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A__ = torch.tensor([f.label for f in features],dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A__ = torch.tensor([f.label for f in features],dtype=torch.float )
return DataLoader(
TensorDataset(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ),batch_size=__lowerCamelCase,shuffle=__lowerCamelCase,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A__ = self(**__lowerCamelCase )
A__ , A__ = outputs[:2]
A__ = logits.detach().cpu().numpy()
A__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
A__ = np.concatenate([x['''pred'''] for x in outputs],axis=0 )
if self.hparams.glue_output_mode == "classification":
A__ = np.argmax(__lowerCamelCase,axis=1 )
elif self.hparams.glue_output_mode == "regression":
A__ = np.squeeze(__lowerCamelCase )
A__ = np.concatenate([x['''target'''] for x in outputs],axis=0 )
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task,__lowerCamelCase,__lowerCamelCase )}
A__ = dict(results.items() )
A__ = results
return ret, preds_list, out_label_list
def UpperCamelCase ( self,__lowerCamelCase ):
A__ , A__ , A__ = self._eval_end(__lowerCamelCase )
A__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self,__lowerCamelCase ):
A__ , A__ , A__ = self._eval_end(__lowerCamelCase )
A__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( __lowerCamelCase,__lowerCamelCase ):
BaseTransformer.add_model_specific_args(__lowerCamelCase,__lowerCamelCase )
parser.add_argument(
'''--max_seq_length''',default=128,type=__lowerCamelCase,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
),)
parser.add_argument(
'''--task''',default='''''',type=__lowerCamelCase,required=__lowerCamelCase,help='''The GLUE task to run''',)
parser.add_argument(
'''--gpus''',default=0,type=__lowerCamelCase,help='''The number of GPUs allocated for this, it is by default 0 meaning none''',)
parser.add_argument(
'''--overwrite_cache''',action='''store_true''',help='''Overwrite the cached training and evaluation sets''' )
return parser
def UpperCamelCase__( )->Any:
A__ = argparse.ArgumentParser()
add_generic_args(UpperCamelCase__ , os.getcwd() )
A__ = GLUETransformer.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
A__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A__ = os.path.join(
'''./results''' , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
A__ = GLUETransformer(UpperCamelCase__ )
A__ = generic_train(UpperCamelCase__ , UpperCamelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A__ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=UpperCamelCase__ ) )
A__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 193 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_SCREAMING_SNAKE_CASE : List[Any] = 8
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=BITS ) -> List[Any]:
snake_case = x.device
snake_case = (x * 2_55).int().clamp(0 , 2_55 )
snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase )
snake_case = rearrange(__lowerCAmelCase , """d -> d 1 1""" )
snake_case = rearrange(__lowerCAmelCase , """b c h w -> b c 1 h w""" )
snake_case = ((x & mask) != 0).float()
snake_case = rearrange(__lowerCAmelCase , """b c d h w -> b (c d) h w""" )
snake_case = bits * 2 - 1
return bits
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=BITS ) -> Optional[Any]:
snake_case = x.device
snake_case = (x > 0).int()
snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase , dtype=torch.intaa )
snake_case = rearrange(__lowerCAmelCase , """d -> d 1 1""" )
snake_case = rearrange(__lowerCAmelCase , """b (c d) h w -> b c d h w""" , d=8 )
snake_case = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def __lowerCamelCase ( self : str , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict=None , __lowerCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> ฮท
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case = self.alphas_cumprod[timestep]
snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case = self.bit_scale
if self.config.clip_sample:
snake_case = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 5. compute variance: "sigma_t(ฮท)" -> see formula (16)
# ฯ_t = sqrt((1 โ ฮฑ_tโ1)/(1 โ ฮฑ_t)) * sqrt(1 โ ฮฑ_t/ฮฑ_tโ1)
snake_case = self._get_variance(__lowerCAmelCase , __lowerCAmelCase )
snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case = model_output.device if torch.is_tensor(__lowerCAmelCase ) else """cpu"""
snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase ).to(__lowerCAmelCase )
snake_case = self._get_variance(__lowerCAmelCase , __lowerCAmelCase ) ** 0.5 * eta * noise
snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def __lowerCamelCase ( self : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int]="epsilon" , __lowerCAmelCase : Dict=None , __lowerCAmelCase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case , snake_case = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
snake_case = None
# 1. compute alphas, betas
snake_case = self.alphas_cumprod[t]
snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case = 1 - alpha_prod_t
snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
snake_case = self.bit_scale
if self.config.clip_sample:
snake_case = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample ยต_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case = 0
if t > 0:
snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCAmelCase ).to(model_output.device )
snake_case = (self._get_variance(__lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : UNetaDConditionModel , __snake_case : Union[DDIMScheduler, DDPMScheduler] , __snake_case : Optional[float] = 1.0 , )-> List[str]:
super().__init__()
snake_case = bit_scale
snake_case = (
ddim_bit_scheduler_step if isinstance(__snake_case , __snake_case ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Optional[Any] , __snake_case : Optional[int] = 2_56 , __snake_case : Optional[int] = 2_56 , __snake_case : Optional[int] = 50 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[int] = 1 , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : int , )-> Union[Tuple, ImagePipelineOutput]:
snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__snake_case , )
snake_case = decimal_to_bits(__snake_case ) * self.bit_scale
snake_case = latents.to(self.device )
self.scheduler.set_timesteps(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case = self.unet(__snake_case , __snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case = self.scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
snake_case = bits_to_decimal(__snake_case )
if output_type == "pil":
snake_case = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 357 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 0 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase_ = 6378137.0
lowercase_ = 6356752.314245
lowercase_ = 6_3_7_8_1_3_7
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
__a = (AXIS_A - AXIS_B) / AXIS_A
__a = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
__a = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
__a = radians(lowerCAmelCase__ )
__a = radians(lowerCAmelCase__ )
# Equation
__a = sin((phi_a - phi_a) / 2 )
__a = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__a = sqrt(sin_sq_phi + (cos(lowerCAmelCase__ ) * cos(lowerCAmelCase__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22 | 0 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = "T5Config"
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = "mt5"
__UpperCamelCase: Dict = MTaConfig
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = "mt5"
__UpperCamelCase: int = MTaConfig
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = "mt5"
__UpperCamelCase: Optional[Any] = MTaConfig
| 31 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Return True if there is node that has not iterated.
lowercase :Union[str, Any] = [False] * len(lowerCamelCase )
lowercase :Union[str, Any] = []
queue.append(lowerCamelCase )
lowercase :Optional[Any] = True
while queue:
lowercase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
lowercase :Dict = True
lowercase :Dict = u
return visited[t]
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# This array is filled by BFS and to store path
lowercase :Optional[int] = [-1] * (len(lowerCamelCase ))
lowercase :int = 0
while bfs(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :int = float("Inf" )
lowercase :Any = sink
while s != source:
# Find the minimum value in select path
lowercase :Any = min(lowerCamelCase, graph[parent[s]][s] )
lowercase :Dict = parent[s]
max_flow += path_flow
lowercase :Union[str, Any] = sink
while v != source:
lowercase :List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase :Any = parent[v]
return max_flow
_UpperCAmelCase : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_UpperCAmelCase , _UpperCAmelCase : Any = 0, 5
print(ford_fulkerson(graph, source, sink))
| 236 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Tuple=7 , __magic_name__ :Union[str, Any]=3 , __magic_name__ :Optional[int]=18 , __magic_name__ :Union[str, Any]=30 , __magic_name__ :Optional[int]=400 , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=None , __magic_name__ :Dict=True , ):
'''simple docstring'''
a = size if size is not None else {"""height""": 18, """width""": 18}
a = parent
a = batch_size
a = num_channels
a = image_size
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = apply_ocr
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """apply_ocr""" ) )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __magic_name__ )
self.assertIsInstance(encoding.boxes , __magic_name__ )
# Test batched
a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = LayoutLMvaImageProcessor()
from datasets import load_dataset
a = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
a = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
a = image_processing(__magic_name__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """โIntroductory""", """Remarksโ""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
a = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __magic_name__ )
self.assertListEqual(encoding.boxes , __magic_name__ )
# with apply_OCR = False
a = LayoutLMvaImageProcessor(apply_ocr=__magic_name__ )
a = image_processing(__magic_name__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 347 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = CanineTokenizer
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ):
'''simple docstring'''
a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
a = 1024
return tokenizer
@require_torch
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.canine_tokenizer
a = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
a = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = """ He is very happy, UNwant\u00E9d,running"""
a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a = chr(0Xe_0_0_7 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
a = tokenizer.__class__.from_pretrained(__magic_name__ )
a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a , a = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_5
a = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = chr(0Xe_0_0_5 )
a = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
a = tokenizer.tokenize(__magic_name__ )
a = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
a = 0Xe_0_0_6
a = chr(__magic_name__ )
a = [new_token_a]
a = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a = 0Xe_0_0_7
a = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
a = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """hello world"""
if self.space_between_special_tokens:
a = """[CLS] hello world [SEP]"""
else:
a = input
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
a = """a"""
a = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
a = 0Xe_0_0_6
a = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
| 347 | 1 |
from __future__ import annotations
__UpperCAmelCase = tuple[int, int, int]
__UpperCAmelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__UpperCAmelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
__UpperCAmelCase = "EGZWVONAHDCLFQMSIPJBYUKXTR"
__UpperCAmelCase = "FOBHMDKEXQNRAULPGSJVTYICZW"
__UpperCAmelCase = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
__UpperCAmelCase = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
__UpperCAmelCase = "RMDJXFUWGISLHVTCQNKYPBEZOA"
__UpperCAmelCase = "SGLCPQWZHKXAREONTFBVIYJUDM"
__UpperCAmelCase = "HVSICLTYKQUBXDWAJZOMFGPREN"
__UpperCAmelCase = "RZWQHFMVDBKICJLNTUXAGYPSOE"
__UpperCAmelCase = "LFKIJODBEGAMQPXVUHYSTCZRWN"
__UpperCAmelCase = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if (unique_rotsel := len(set(__UpperCamelCase ) )) < 3:
SCREAMING_SNAKE_CASE_ = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(__UpperCamelCase )
# Checks if rotor positions are valid
SCREAMING_SNAKE_CASE_ = rotpos
if not 0 < rotorposa <= len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(__UpperCamelCase )
if not 0 < rotorposa <= len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(__UpperCamelCase )
if not 0 < rotorposa <= len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(__UpperCamelCase )
# Validates string and returns dict
SCREAMING_SNAKE_CASE_ = _plugboard(__UpperCamelCase )
return rotpos, rotsel, pbdict
def A__ ( __lowerCamelCase ):
if not isinstance(__UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''Plugboard setting isn\'t type string ({type(__UpperCamelCase )})'''
raise TypeError(__UpperCamelCase )
elif len(__UpperCamelCase ) % 2 != 0:
SCREAMING_SNAKE_CASE_ = F'''Odd number of symbols ({len(__UpperCamelCase )})'''
raise Exception(__UpperCamelCase )
elif pbstring == "":
return {}
pbstring.replace(''' ''', '''''' )
# Checks if all characters are unique
SCREAMING_SNAKE_CASE_ = set()
for i in pbstring:
if i not in abc:
SCREAMING_SNAKE_CASE_ = F'''\'{i}\' not in list of symbols'''
raise Exception(__UpperCamelCase )
elif i in tmppbl:
SCREAMING_SNAKE_CASE_ = F'''Duplicate symbol ({i})'''
raise Exception(__UpperCamelCase )
else:
tmppbl.add(__UpperCamelCase )
del tmppbl
# Created the dictionary
SCREAMING_SNAKE_CASE_ = {}
for j in range(0, len(__UpperCamelCase ) - 1, 2 ):
SCREAMING_SNAKE_CASE_ = pbstring[j + 1]
SCREAMING_SNAKE_CASE_ = pbstring[j]
return pb
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = (rotora, rotora, rotora), __lowerCamelCase = "", ):
SCREAMING_SNAKE_CASE_ = text.upper()
SCREAMING_SNAKE_CASE_ = _validator(
__UpperCamelCase, __UpperCamelCase, plugb.upper() )
SCREAMING_SNAKE_CASE_ = rotor_position
SCREAMING_SNAKE_CASE_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
SCREAMING_SNAKE_CASE_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
SCREAMING_SNAKE_CASE_ = plugboard[symbol]
# rotor ra --------------------------
SCREAMING_SNAKE_CASE_ = abc.index(__UpperCamelCase ) + rotorposa
SCREAMING_SNAKE_CASE_ = rotora[index % len(__UpperCamelCase )]
# rotor rb --------------------------
SCREAMING_SNAKE_CASE_ = abc.index(__UpperCamelCase ) + rotorposa
SCREAMING_SNAKE_CASE_ = rotora[index % len(__UpperCamelCase )]
# rotor rc --------------------------
SCREAMING_SNAKE_CASE_ = abc.index(__UpperCamelCase ) + rotorposa
SCREAMING_SNAKE_CASE_ = rotora[index % len(__UpperCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
SCREAMING_SNAKE_CASE_ = reflector[symbol]
# 2nd rotors
SCREAMING_SNAKE_CASE_ = abc[rotora.index(__UpperCamelCase ) - rotorposa]
SCREAMING_SNAKE_CASE_ = abc[rotora.index(__UpperCamelCase ) - rotorposa]
SCREAMING_SNAKE_CASE_ = abc[rotora.index(__UpperCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
SCREAMING_SNAKE_CASE_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
rotorposa += 1
if rotorposa >= len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
rotorposa += 1
if rotorposa >= len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__UpperCamelCase )
return "".join(__UpperCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = "This is my Python script that emulates the Enigma machine from WWII."
__UpperCAmelCase = (1, 1, 1)
__UpperCAmelCase = "pictures"
__UpperCAmelCase = (rotora, rotora, rotora)
__UpperCAmelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 299 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = 0
if start < end:
lowerCAmelCase_ : Dict = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[str] = a[end]
lowerCAmelCase_ : List[str] = a[pivot]
lowerCAmelCase_ : Any = temp
lowerCAmelCase_ , lowerCAmelCase_ : Any = _in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 )
count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase )
return count
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Tuple = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : str = a[end]
lowerCAmelCase_ : List[Any] = a[pivot]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Dict = start - 1
for index in range(__UpperCamelCase , __UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase_ : Dict = new_pivot_index + 1
lowerCAmelCase_ : Tuple = a[new_pivot_index]
lowerCAmelCase_ : List[Any] = a[index]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Any = a[new_pivot_index + 1]
lowerCAmelCase_ : int = a[end]
lowerCAmelCase_ : str = temp
return new_pivot_index + 1, count
lowercase__ = TemporaryFile()
lowercase__ = 100 # 1000 elements are to be sorted
lowercase__ , lowercase__ = 0, 1 # mean and standard deviation
lowercase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowercase__ = np.load(outfile)
lowercase__ = len(M) - 1
lowercase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 241 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ):
'''simple docstring'''
snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {}
snake_case_ : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ):
'''simple docstring'''
snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,):
super().__init__()
snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" )
snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" )
snake_case_ : Optional[int] = self.get_char_lens(self.src_file )
snake_case_ : List[str] = max_source_length
snake_case_ : str = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
snake_case_ : str = tokenizer
snake_case_ : str = prefix
if n_obs is not None:
snake_case_ : int = self.src_lens[:n_obs]
snake_case_ : Tuple = src_lang
snake_case_ : str = tgt_lang
def __len__( self :Any ):
return len(self.src_lens )
def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Optional[int] = index + 1 # linecache starts at 1
snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" )
snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer
)
snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer
snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" )
snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" )
snake_case_ : int = source_inputs["""input_ids"""].squeeze()
snake_case_ : str = target_inputs["""input_ids"""].squeeze()
snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( _UpperCamelCase :str ):
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ):
snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] )
snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
snake_case_ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase )
snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase )
snake_case_ : Optional[int] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A : List[Any] = getLogger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : int = get_git_info()
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
with open(lowerCamelCase_ ) as f:
return json.load(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ )
snake_case_ : List[str] = {
"""repo_id""": str(lowerCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ):
'''simple docstring'''
return list(map(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
with open(lowerCamelCase_ , """wb""" ) as f:
return pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
def remove_articles(lowerCamelCase_ :str ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ )
def white_space_fix(lowerCamelCase_ :Optional[Any] ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase_ :Tuple ):
snake_case_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase_ :Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split()
snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split()
snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ )
snake_case_ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ):
'''simple docstring'''
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
snake_case_ : Optional[int] = 0
for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ):
em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
em /= len(lowerCamelCase_ )
return {"em": em}
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ : Optional[int] = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
continue
snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p]
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
return hparams, config | 8 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__A : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,)
super().__init__(*_UpperCamelCase ,**_UpperCamelCase ) | 8 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_lowercase, _lowercase ) ) )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
snake_case_ :Tuple = (
"""Wrong input data's dimensions... """
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(_lowercase )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ :Tuple = (
"""Wrong input data's shape... """
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(_lowercase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
snake_case_ :Any = (
"""Input data have different datatype... """
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(_lowercase )
snake_case_ :List[Any] = []
for value in value_array:
snake_case_ :Union[str, Any] = euclidean(_lowercase, dataset[0] )
snake_case_ :List[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ :Dict = euclidean(_lowercase, _lowercase )
if dist > temp_dist:
snake_case_ :Any = temp_dist
snake_case_ :Tuple = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
return np.dot(_lowercase, _lowercase ) / (norm(_lowercase ) * norm(_lowercase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__a = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase )
return [m.group(0 ) for m in matches]
def A_ ( ):
'''simple docstring'''
snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ :Dict = {
config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case_ :Optional[Any] = collections.defaultdict(_lowercase )
snake_case_ :int = collections.defaultdict(_lowercase )
snake_case_ :List[str] = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowercase ):
snake_case_ :int = None
if _re_tf_models.match(_lowercase ) is not None:
snake_case_ :int = tf_models
snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
snake_case_ :List[Any] = flax_models
snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
snake_case_ :Optional[Any] = pt_models
snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case_ :Optional[int] = True
break
# Try again after removing the last word in the name
snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] )
snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case_ :Optional[Any] = list(_lowercase )
all_models.sort()
snake_case_ :Optional[int] = {"""model_type""": all_models}
snake_case_ :Optional[int] = [pt_models[t] for t in all_models]
snake_case_ :Any = [tf_models[t] for t in all_models]
snake_case_ :Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case_ :Dict = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case_ :Optional[Any] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case_ :Tuple = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case_ :Tuple = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case_ :str = """AutoTokenizer"""
snake_case_ :int = [processors[t] for t in all_models]
return pd.DataFrame(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowercase, _lowercase ):
continue
# First extract all model_names
snake_case_ :Tuple = []
for name in getattr(_lowercase, _lowercase ).values():
if isinstance(_lowercase, _lowercase ):
model_names.append(_lowercase )
else:
model_names.extend(list(_lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = get_frameworks_table()
snake_case_ :str = Dataset.from_pandas(_lowercase )
snake_case_ :List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase )
snake_case_ :List[str] = Dataset.from_json(_lowercase )
snake_case_ :int = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowercase ) )
}
snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case_ :Tuple = sorted(table.keys() )
snake_case_ :Tuple = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case_ :Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case_ :List[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS
snake_case_ :List[str] = []
for key in pipeline_tasks:
if key not in in_table:
snake_case_ :int = pipeline_tasks[key]["""pt"""]
if isinstance(_lowercase, (list, tuple) ):
snake_case_ :Any = model[0]
snake_case_ :str = model.__name__
if model not in in_table.values():
missing.append(_lowercase )
if len(_lowercase ) > 0:
snake_case_ :Optional[int] = """, """.join(_lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__a = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 66 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : str , *_a : int , **_a : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : str , *_a : Tuple , **_a : List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Tuple , **_a : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : str , *_a : int , **_a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : Optional[int] , **_a : str ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Optional[Any] , *_a : List[str] , **_a : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Any , *_a : str , **_a : List[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[Any] , *_a : str , **_a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : int , **_a : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Union[str, Any] , *_a : Any , **_a : str ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : str , *_a : Tuple , **_a : int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Tuple , **_a : str ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Tuple , *_a : Tuple , **_a : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : Any , **_a : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Any , **_a : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Optional[Any] , *_a : List[str] , **_a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : str , *_a : List[Any] , **_a : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : Dict , **_a : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Dict , *_a : int , **_a : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Optional[Any] , **_a : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : str , *_a : int , **_a : str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Dict , *_a : Optional[int] , **_a : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : int , *_a : Any , **_a : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[Any] , *_a : List[Any] , **_a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : int , *_a : List[str] , **_a : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : Union[str, Any] , **_a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Tuple , *_a : Any , **_a : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[str] , *_a : Any , **_a : int ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : int , *_a : Optional[Any] , **_a : Dict ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : int , *_a : str , **_a : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[str] , *_a : int , **_a : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Dict , **_a : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : List[str] , **_a : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
def _lowerCAmelCase ( *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['torch'] )
def _lowerCAmelCase ( *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['torch'] )
def _lowerCAmelCase ( *_UpperCamelCase : int , **_UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['torch'] )
def _lowerCAmelCase ( *_UpperCamelCase : Dict , **_UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['torch'] )
def _lowerCAmelCase ( *_UpperCamelCase : Optional[int] , **_UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['torch'] )
def _lowerCAmelCase ( *_UpperCamelCase : int , **_UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['torch'] )
def _lowerCAmelCase ( *_UpperCamelCase : Any , **_UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(_UpperCamelCase , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[str] , *_a : Tuple , **_a : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Tuple , *_a : str , **_a : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Optional[Any] , **_a : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : str , *_a : List[Any] , **_a : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[Any] , *_a : Optional[Any] , **_a : Dict ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : str , *_a : Tuple , **_a : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[str] , *_a : Any , **_a : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Optional[int] , **_a : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : str , *_a : int , **_a : List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : int , *_a : List[str] , **_a : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : List[Any] , **_a : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : Union[str, Any] , **_a : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : str , *_a : Optional[int] , **_a : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Any , **_a : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : str , *_a : List[str] , **_a : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[Any] , *_a : Union[str, Any] , **_a : Dict ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : List[Any] , **_a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Any , **_a : int ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Optional[int] , *_a : str , **_a : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : int , *_a : Any , **_a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : Dict , **_a : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[Any] , *_a : Optional[Any] , **_a : Optional[int] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Any , **_a : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : Optional[int] , **_a : List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Any , *_a : List[str] , **_a : int ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Any , *_a : str , **_a : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Tuple , *_a : str , **_a : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Union[str, Any] , *_a : List[Any] , **_a : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Dict , **_a : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : str , **_a : int ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Optional[Any] , *_a : Any , **_a : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Any , *_a : Dict , **_a : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : Dict , **_a : str ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Optional[Any] , *_a : Optional[int] , **_a : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Any , *_a : List[Any] , **_a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : str , **_a : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Tuple , *_a : str , **_a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Tuple , *_a : Tuple , **_a : Dict ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : str , **_a : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[str] , *_a : str , **_a : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : List[str] , **_a : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : Dict , **_a : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : str , *_a : Union[str, Any] , **_a : List[str] ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : str , *_a : Optional[int] , **_a : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : int , **_a : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Optional[int] , *_a : int , **_a : Tuple ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Union[str, Any] , **_a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Optional[Any] , **_a : int ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[Any] , *_a : Union[str, Any] , **_a : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : Optional[int] , **_a : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[Any] , *_a : str , **_a : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : str , *_a : Any , **_a : Any ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : List[Any] , **_a : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[Any] , *_a : Any , **_a : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[str] , *_a : Optional[int] , **_a : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : int , *_a : Union[str, Any] , **_a : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : int , *_a : List[Any] , **_a : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : int , *_a : List[Any] , **_a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Dict , **_a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Any , *_a : Optional[int] , **_a : int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Any , *_a : List[Any] , **_a : List[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Dict , **_a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : str , *_a : List[str] , **_a : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Any , *_a : str , **_a : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Dict , **_a : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Any , *_a : Dict , **_a : int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Any , *_a : Optional[Any] , **_a : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[Any] , *_a : Dict , **_a : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : str , *_a : List[Any] , **_a : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Optional[int] , *_a : Optional[int] , **_a : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : str , *_a : str , **_a : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : str , *_a : Dict , **_a : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Any , *_a : Dict , **_a : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : str , *_a : Optional[int] , **_a : List[str] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Any , *_a : Any , **_a : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : int , *_a : int , **_a : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : str , *_a : Dict , **_a : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Dict , **_a : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[Any] , *_a : Any , **_a : int ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : List[Any] , **_a : int ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Any , **_a : Dict ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Union[str, Any] , *_a : int , **_a : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Tuple , **_a : Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : str , *_a : str , **_a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Any , *_a : Tuple , **_a : Any ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[Any] , *_a : Any , **_a : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Any , **_a : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Union[str, Any] , *_a : Optional[Any] , **_a : Any ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Tuple , *_a : str , **_a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : Optional[Any] , **_a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : int , *_a : List[Any] , **_a : Tuple ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Optional[int] , **_a : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[str] , *_a : str , **_a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Any , *_a : Any , **_a : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Optional[Any] , **_a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Any , *_a : Optional[Any] , **_a : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : List[str] , *_a : List[str] , **_a : Any ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Optional[Any] , **_a : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : str , **_a : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Optional[int] , *_a : Tuple , **_a : str ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Tuple , **_a : str ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Any , *_a : List[Any] , **_a : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Tuple , *_a : Union[str, Any] , **_a : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : List[str] , **_a : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Dict , *_a : List[Any] , **_a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Any , *_a : int , **_a : Any ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : str , *_a : List[Any] , **_a : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : List[Any] , *_a : List[str] , **_a : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Dict , *_a : List[str] , **_a : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : int , **_a : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : Tuple , **_a : List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : Dict , *_a : Any , **_a : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : List[Any] , *_a : Any , **_a : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Union[str, Any] , **_a : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class A__ ( metaclass=A__ ):
A__ = ['torch']
def __init__( self : int , *_a : List[Any] , **_a : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A ( cls : Optional[int] , *_a : Optional[int] , **_a : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A ( cls : Any , *_a : Optional[Any] , **_a : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
| 363 |
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 114 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase = True ) -> int:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
snake_case : Union[str, Any] = timm.create_model("""levit_128s""" ,pretrained=_UpperCAmelCase )
else:
snake_case : int = timm.create_model("""levit_128""" ,pretrained=_UpperCAmelCase )
if hidden_sizes == 192:
snake_case : List[str] = timm.create_model("""levit_192""" ,pretrained=_UpperCAmelCase )
if hidden_sizes == 256:
snake_case : List[Any] = timm.create_model("""levit_256""" ,pretrained=_UpperCAmelCase )
if hidden_sizes == 384:
snake_case : int = timm.create_model("""levit_384""" ,pretrained=_UpperCAmelCase )
from_model.eval()
snake_case : Optional[Any] = LevitForImageClassificationWithTeacher(_UpperCAmelCase ).eval()
snake_case : List[str] = OrderedDict()
snake_case : str = from_model.state_dict()
snake_case : Optional[Any] = list(from_model.state_dict().keys() )
snake_case : str = list(our_model.state_dict().keys() )
print(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) )
for i in range(len(_UpperCAmelCase ) ):
snake_case : List[Any] = weights[og_keys[i]]
our_model.load_state_dict(_UpperCAmelCase )
snake_case : int = torch.randn((2, 3, 224, 224) )
snake_case : Optional[int] = from_model(_UpperCAmelCase )
snake_case : Any = our_model(_UpperCAmelCase ).logits
assert torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ), "The model logits don't match the original one."
snake_case : Optional[Any] = name
print(_UpperCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = None ,lowercase = True ) -> List[str]:
snake_case : Optional[int] = 'imagenet-1k-id2label.json'
snake_case : Tuple = 1000
snake_case : Union[str, Any] = (1, num_labels)
snake_case : List[str] = 'huggingface/label-files'
snake_case : int = num_labels
snake_case : Tuple = json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : List[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case : List[Any] = idalabel
snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case : Union[str, Any] = partial(_UpperCAmelCase ,num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase )
snake_case : int = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
snake_case : int = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,_UpperCAmelCase ,names_to_config[model_name] ,_UpperCAmelCase ,_UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCamelCase : Optional[Any] = parser.parse_args()
lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 124 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[int] = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Tuple = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : str = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Union[str, Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Dict = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCamelCase__ : List[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCamelCase__ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCamelCase__ : Optional[Any] = [2, 2, 20]
lowerCamelCase__ : Optional[int] = [3, 12, 16]
lowerCamelCase__ : str = [192, 768, 1024]
lowerCamelCase__ : Any = CvtForImageClassification(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lowerCamelCase__ : Optional[int] = OrderedDict()
lowerCamelCase__ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCamelCase__ : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
lowerCamelCase__ : str = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
lowerCamelCase__ : str = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 50 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[int] = """โ"""
a : int = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
a : int = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
a : Tuple = {"""vinai/bartpho-syllable""": 1024}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[int]= AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
lowercase__ : Union[str, Any]= {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowercase__ : List[str]= vocab_file
lowercase__ : Tuple= monolingual_vocab_file
lowercase__ : List[Any]= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ : Optional[int]= {}
lowercase__ : Optional[int]= 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(snake_case__ ) not in self.fairseq_tokens_to_ids:
lowercase__ : Optional[Any]= cnt
cnt += 1
with open(snake_case__ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
lowercase__ : str= line.strip().split()[0]
lowercase__ : Any= len(self.fairseq_tokens_to_ids )
if str(snake_case__ ) not in self.fairseq_tokens_to_ids:
lowercase__ : Optional[int]= len(self.fairseq_tokens_to_ids )
lowercase__ : Tuple= {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.__dict__.copy()
lowercase__ : Union[str, Any]= None
lowercase__ : int= self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[str]= d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ : Optional[int]= {}
lowercase__ : Optional[Any]= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : List[str]= [self.cls_token_id]
lowercase__ : int= [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Optional[int]= [self.sep_token_id]
lowercase__ : Any= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Tuple= os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
lowercase__ : Optional[Any]= self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
snake_case__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , snake_case__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(snake_case__ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(snake_case__ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 150 |
"""simple docstring"""
import os
from pathlib import Path
def lowercase__() ->List[Any]:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowercase__ : Any= Path(A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowercase__ : Any= [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , A , with_cuda=A , extra_include_paths=[str(A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 150 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
a_ :Optional[Any] = None
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
a_ :Tuple = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
a_ :Union[str, Any] = {
"camembert-base": 512,
}
a_ :Optional[int] = "โ"
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE = CamembertTokenizer
def __init__( self : Union[str, Any], _snake_case : Optional[int]=None, _snake_case : List[str]=None, _snake_case : List[Any]="<s>", _snake_case : Union[str, Any]="</s>", _snake_case : Optional[int]="</s>", _snake_case : List[str]="<s>", _snake_case : Union[str, Any]="<unk>", _snake_case : Union[str, Any]="<pad>", _snake_case : str="<mask>", _snake_case : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"], **_snake_case : Tuple, ) ->int:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[Any] = AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else mask_token
super().__init__(
_snake_case, tokenizer_file=_snake_case, bos_token=_snake_case, eos_token=_snake_case, sep_token=_snake_case, cls_token=_snake_case, unk_token=_snake_case, pad_token=_snake_case, mask_token=_snake_case, additional_special_tokens=_snake_case, **_snake_case, )
snake_case__ : List[Any] = vocab_file
snake_case__ : Union[str, Any] = False if not self.vocab_file else True
def lowercase_ ( self : str, _snake_case : List[int], _snake_case : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
snake_case__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Any, _snake_case : List[int], _snake_case : Optional[List[int]] = None ) ->List[int]:
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Optional[Any], _snake_case : str, _snake_case : Optional[str] = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : str = os.path.join(
_snake_case, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file, _snake_case )
return (out_vocab_file,)
| 277 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a_ :Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and ลukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a_ :List[str] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a_ :List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : str ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def lowercase_ ( self : str, _snake_case : List[List[List[str]]], _snake_case : List[List[str]], _snake_case : int = 1, _snake_case : int = 4, ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case, hypotheses=_snake_case, min_len=_snake_case, max_len=_snake_case )
}
| 277 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
a : List[str] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(UpperCAmelCase_)
a : str = -1
a : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(UpperCAmelCase_)
a : int = model.generate(UpperCAmelCase_ , max_new_tokens=1_0 , do_sample=UpperCAmelCase_)
a : int = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
a : Optional[int] = TextStreamer(UpperCAmelCase_)
model.generate(UpperCAmelCase_ , max_new_tokens=1_0 , do_sample=UpperCAmelCase_ , streamer=UpperCAmelCase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a : Dict = cs.out[:-1]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
a : Tuple = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(UpperCAmelCase_)
a : Dict = -1
a : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(UpperCAmelCase_)
a : str = model.generate(UpperCAmelCase_ , max_new_tokens=1_0 , do_sample=UpperCAmelCase_)
a : Optional[int] = tokenizer.decode(greedy_ids[0])
a : Optional[int] = TextIteratorStreamer(UpperCAmelCase_)
a : Optional[int] = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
a : Optional[Any] = Thread(target=model.generate , kwargs=UpperCAmelCase_)
thread.start()
a : Optional[int] = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
a : Optional[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(UpperCAmelCase_)
a : Optional[Any] = -1
a : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(UpperCAmelCase_)
a : Tuple = model.generate(UpperCAmelCase_ , max_new_tokens=1_0 , do_sample=UpperCAmelCase_)
a : List[Any] = greedy_ids[:, input_ids.shape[1] :]
a : List[Any] = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
a : List[str] = TextStreamer(UpperCAmelCase_ , skip_prompt=UpperCAmelCase_)
model.generate(UpperCAmelCase_ , max_new_tokens=1_0 , do_sample=UpperCAmelCase_ , streamer=UpperCAmelCase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a : List[Any] = cs.out[:-1]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = AutoTokenizer.from_pretrained('distilgpt2')
a : List[str] = AutoModelForCausalLM.from_pretrained('distilgpt2').to(UpperCAmelCase_)
a : Any = -1
a : str = torch.ones((1, 5) , device=UpperCAmelCase_).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a : Dict = TextStreamer(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_)
model.generate(UpperCAmelCase_ , max_new_tokens=1 , do_sample=UpperCAmelCase_ , streamer=UpperCAmelCase_)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a : Optional[Any] = cs.out[:-1] # Remove the final "\n"
a : Tuple = tokenizer(UpperCAmelCase_ , return_tensors='pt')
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
a : List[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(UpperCAmelCase_)
a : int = -1
a : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(UpperCAmelCase_)
a : Any = TextIteratorStreamer(UpperCAmelCase_ , timeout=0.0_01)
a : List[str] = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
a : int = Thread(target=model.generate , kwargs=UpperCAmelCase_)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase_):
a : Optional[Any] = ''
for new_text in streamer:
streamer_text += new_text
| 345 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowercase_ ( _UpperCAmelCase ):
'''simple docstring'''
__snake_case = CustomTokenizer
pass
| 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306 | 0 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = torch.nn.Linear(10 , 10 )
__lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
__lowerCAmelCase = Accelerator()
__lowerCAmelCase = accelerator.prepare(__a )
try:
pickle.loads(pickle.dumps(__a ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 259 |
"""simple docstring"""
import string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__lowerCAmelCase = ""
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCAmelCase = string.ascii_uppercase.find(_UpperCamelCase )
__lowerCAmelCase = num - key
if num < 0:
__lowerCAmelCase = num + len(string.ascii_uppercase )
__lowerCAmelCase = translated + string.ascii_uppercase[num]
else:
__lowerCAmelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = input("Encrypted message: " )
__lowerCAmelCase = message.upper()
decrypt(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 259 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : Optional[Any] ,lowercase__ : int=3 ,lowercase__ : Any=3_2 ,lowercase__ : List[str]=3 ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Any=[1_0, 2_0, 3_0, 4_0] ,lowercase__ : Union[str, Any]=[1, 1, 2, 1] ,lowercase__ : str=True ,lowercase__ : Optional[Any]=True ,lowercase__ : List[Any]="relu" ,lowercase__ : Any=3 ,lowercase__ : Any=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = embeddings_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = scope
__lowercase = len(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int ):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : List[Any] ):
__lowercase = RegNetModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[str] ):
__lowercase = self.num_labels
__lowercase = RegNetForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (_A , _A , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[Any] = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = RegNetModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(config=lowercase__ )
for name, module in model.named_modules():
if isinstance(lowercase__ ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
def SCREAMING_SNAKE_CASE ( self : str ):
def check_hidden_states_output(lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(lowercase__ ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowercase = layer_type
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = RegNetModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
| 104 |
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0 ):
lowercase_ : str = 0
lowercase_ : List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Optional[Any] = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _snake_case ( lowerCamelCase__ ):
_lowercase : Tuple = 'levit'
def __init__( self , a=224 , a=3 , a=3 , a=2 , a=1 , a=16 , a=[128, 256, 384] , a=[4, 8, 12] , a=[4, 4, 4] , a=[16, 16, 16] , a=0 , a=[2, 2, 2] , a=[2, 2, 2] , a=0.02 , **a , ) -> Tuple:
super().__init__(**a)
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _snake_case ( lowerCamelCase__ ):
_lowercase : List[str] = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return 1E-4
| 356 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase__ (_UpperCAmelCase):
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set())
@pytest.fixture
def lowerCamelCase__ (_UpperCAmelCase):
class _snake_case :
def __init__( self , a) -> List[Any]:
SCREAMING_SNAKE_CASE = metric_id
class _snake_case :
_lowercase : Optional[Any] = [MetricMock(A__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock())
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if "tmp_path" in args:
SCREAMING_SNAKE_CASE = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args)
with pytest.warns(_UpperCAmelCase , match='https://huggingface.co/docs/evaluate'):
func(*_UpperCAmelCase)
| 327 | 0 |
"""simple docstring"""
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(__a )]
SCREAMING_SNAKE_CASE_ : str = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__a ) <= key:
return input_string
for position, character in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : int = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__a )
SCREAMING_SNAKE_CASE_ : List[str] = [''''''.join(__a ) for row in temp_grid]
SCREAMING_SNAKE_CASE_ : Any = ''''''.join(__a )
return output_string
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : List[str] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(__a )] # generates template
for position in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : str = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE_ : str = input_string[counter : counter + len(__a )]
grid.append(list(__a ) )
counter += len(__a )
SCREAMING_SNAKE_CASE_ : Any = '''''' # reads as zigzag
for position in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : Dict = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _A (__a ) -> dict[int, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {}
for key_guess in range(1 , len(__a ) ): # tries every key
SCREAMING_SNAKE_CASE_ : List[Any] = decrypt(__a , __a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = ['pixel_values']
def __init__( self ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = 1 / 2_55 ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
super().__init__(**__UpperCAmelCase )
A__ = size if size is not None else {'shortest_edge': 2_56}
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(__UpperCAmelCase ,size['shortest_edge'] ,default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
A__ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase ,size=(size['height'], size['width']) ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Optional[Any]:
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(__UpperCAmelCase ,scale=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
return normalize(__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(__UpperCAmelCase )
if do_resize:
A__ = self.resize(image=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase )
if do_center_crop:
A__ = self.center_crop(__UpperCAmelCase ,size=__UpperCAmelCase )
if do_rescale:
A__ = self.rescale(image=__UpperCAmelCase ,scale=__UpperCAmelCase ,offset=__UpperCAmelCase )
if do_normalize:
A__ = self.normalize(image=__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase )
A__ = to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase )
return image
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A__ = make_batched(__UpperCAmelCase )
A__ = [
[
self._preprocess_image(
image=__UpperCAmelCase ,do_resize=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,do_center_crop=__UpperCAmelCase ,crop_size=__UpperCAmelCase ,do_rescale=__UpperCAmelCase ,rescale_factor=__UpperCAmelCase ,offset=__UpperCAmelCase ,do_normalize=__UpperCAmelCase ,image_mean=__UpperCAmelCase ,image_std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,)
for img in video
]
for video in videos
]
A__ = {'pixel_values': videos}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
| 221 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :int = len(lowercase__ )
for i in range(n - 1 ):
for j in range(i + 1 , lowercase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _snake_case ( lowercase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if len(lowercase__ ) <= 1:
return arr, 0
lowerCAmelCase_ :Dict = len(lowercase__ ) // 2
lowerCAmelCase_ :List[str] = arr[0:mid]
lowerCAmelCase_ :Dict = arr[mid:]
lowerCAmelCase_ , lowerCAmelCase_ :str = count_inversions_recursive(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = count_inversions_recursive(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Any = _count_cross_inversions(lowercase__ , lowercase__ )
lowerCAmelCase_ :str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :str = 0
while i < len(lowercase__ ) and j < len(lowercase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :int = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCAmelCase_ :List[Any] = count_inversions_bf(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowercase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCAmelCase_ :List[Any] = count_inversions_bf(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :str = count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowercase__ )
# an empty list should also have zero inversions
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :str = count_inversions_bf(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowercase__ )
if __name__ == "__main__":
main()
| 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ShapEImgaImgPipeline
UpperCAmelCase__ : int = ["image"]
UpperCAmelCase__ : List[str] = ["image"]
UpperCAmelCase__ : int = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : List[Any] = False
@property
def _a ( self ) -> Tuple:
return 32
@property
def _a ( self ) -> int:
return 32
@property
def _a ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def _a ( self ) -> Any:
return 8
@property
def _a ( self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__UpperCamelCase =CLIPVisionModel(A_ )
return model
@property
def _a ( self ) -> List[Any]:
__UpperCamelCase =CLIPImageProcessor(
crop_size=224 , do_center_crop=A_ , do_normalize=A_ , do_resize=A_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def _a ( self ) -> str:
torch.manual_seed(0 )
__UpperCamelCase ={
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__UpperCamelCase =PriorTransformer(**A_ )
return model
@property
def _a ( self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase ={
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__UpperCamelCase =ShapERenderer(**A_ )
return model
def _a ( self ) -> List[str]:
__UpperCamelCase =self.dummy_prior
__UpperCamelCase =self.dummy_image_encoder
__UpperCamelCase =self.dummy_image_processor
__UpperCamelCase =self.dummy_renderer
__UpperCamelCase =HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=A_ , clip_sample=A_ , clip_sample_range=1.0 , )
__UpperCamelCase ={
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _a ( self , A_ , A_=0 ) -> Any:
__UpperCamelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(A_ )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase ={
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _a ( self ) -> str:
__UpperCamelCase ='cpu'
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =self.pipeline_class(**A_ )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase =output.images[0]
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCamelCase =np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> int:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> List[str]:
__UpperCamelCase =torch_device == 'cpu'
__UpperCamelCase =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A_ , relax_max_difference=A_ , )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =self.pipeline_class(**A_ )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =1
__UpperCamelCase =2
__UpperCamelCase =self.get_dummy_inputs(A_ )
for key in inputs.keys():
if key in self.batch_params:
__UpperCamelCase =batch_size * [inputs[key]]
__UpperCamelCase =pipe(**A_ , num_images_per_prompt=A_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> int:
__UpperCamelCase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__UpperCamelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__UpperCamelCase =ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(0 )
__UpperCamelCase =pipe(
A_ , generator=A_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A_ , A_ )
| 62 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 296 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : Tuple = sorted(zip(lowerCAmelCase_ , lowerCAmelCase_ ) , key=lambda lowerCAmelCase_ : x[0] / x[1] , reverse=lowerCAmelCase_ )
_a , _a : List[Any] = [i[0] for i in r], [i[1] for i in r]
_a : str = list(accumulate(lowerCAmelCase_ ) )
_a : List[Any] = bisect(lowerCAmelCase_ , lowerCAmelCase_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__lowerCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCAmelCase = dict(zip(vocab, range(len(vocab))))
__lowerCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(tmpdirname)
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__lowerCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__lowerCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__lowerCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__lowerCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__lowerCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 107 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""image_processor""", """tokenizer"""]
__lowercase = """CLIPImageProcessor"""
__lowercase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs.pop('feature_extractor' )
_snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if images is not None:
_snake_case = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
_snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 42 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger("transformers.models.speecht5")
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Dict:
hf_model.apply_weight_norm()
_snake_case = checkpoint['input_conv.weight_g']
_snake_case = checkpoint['input_conv.weight_v']
_snake_case = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_snake_case = checkpoint[F'upsamples.{i}.1.weight_g']
_snake_case = checkpoint[F'upsamples.{i}.1.weight_v']
_snake_case = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
_snake_case = checkpoint['output_conv.1.weight_g']
_snake_case = checkpoint['output_conv.1.weight_v']
_snake_case = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=None , __A=None , ) -> List[Any]:
if config_path is not None:
_snake_case = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
_snake_case = SpeechTaHifiGanConfig()
_snake_case = SpeechTaHifiGan(__A )
_snake_case = torch.load(__A )
load_weights(orig_checkpoint['model']['generator'] , __A , __A )
_snake_case = np.load(__A )
_snake_case = stats[0].reshape(-1 )
_snake_case = stats[1].reshape(-1 )
_snake_case = torch.from_numpy(__A ).float()
_snake_case = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__A )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the ๐ค hub."
)
lowercase : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : Any ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class a_ ( a__ ):
__A = "audio-spectrogram-transformer"
def __init__( self : List[Any] , lowercase : Dict=768 , lowercase : Tuple=12 , lowercase : Any=12 , lowercase : Optional[Any]=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Dict=0.0 , lowercase : Dict=0.0 , lowercase : int=0.02 , lowercase : str=1e-1_2 , lowercase : Dict=16 , lowercase : str=True , lowercase : Dict=10 , lowercase : Optional[Any]=10 , lowercase : Optional[int]=1_024 , lowercase : List[str]=128 , **lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Dict = hidden_size
lowercase_ :int = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :List[Any] = hidden_act
lowercase_ :Any = hidden_dropout_prob
lowercase_ :Dict = attention_probs_dropout_prob
lowercase_ :int = initializer_range
lowercase_ :Union[str, Any] = layer_norm_eps
lowercase_ :Any = patch_size
lowercase_ :Optional[Any] = qkv_bias
lowercase_ :Optional[Any] = frequency_stride
lowercase_ :Optional[int] = time_stride
lowercase_ :Dict = max_length
lowercase_ :Union[str, Any] = num_mel_bins
| 370 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( _lowerCAmelCase ):
__A = ["image_processor", "tokenizer"]
__A = "LayoutLMv3ImageProcessor"
__A = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : int , lowercase : Optional[Any]=None , lowercase : List[str]=None , **lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase , )
lowercase_ :Optional[int] = kwargs.pop("feature_extractor" )
lowercase_ :Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase , lowercase )
def __call__( self : Optional[Any] , lowercase : List[str] , lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase : Union[List[List[int]], List[List[List[int]]]] = None , lowercase : Optional[Union[List[int], List[List[int]]]] = None , lowercase : bool = True , lowercase : Union[bool, str, PaddingStrategy] = False , lowercase : Union[bool, str, TruncationStrategy] = None , lowercase : Optional[int] = None , lowercase : int = 0 , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = True , lowercase : Optional[Union[str, TensorType]] = None , **lowercase : List[Any] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase_ :Dict = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
lowercase_ :str = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase_ :Union[str, Any] = features["words"]
lowercase_ :Optional[Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
lowercase_ :Any = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase_ :Any = self.get_overflowing_images(lowercase , encoded_inputs["overflow_to_sample_mapping"] )
lowercase_ :Any = images
return encoded_inputs
def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : Optional[Any] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def lowercase__ ( self : Union[str, Any] , *lowercase : List[Any] , **lowercase : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def lowercase__ ( self : List[Any] , *lowercase : Any , **lowercase : str ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowercase__ ( self : str ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase , )
return self.image_processor
| 147 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCamelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCamelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCamelCase )
return parser.parse_args()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCAmelCase : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase : List[Any] = script_fpath.stem
_lowerCAmelCase : List[str] = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
_lowerCAmelCase : List[str] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 36 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __SCREAMING_SNAKE_CASE ( A_=None ):
if subparsers is not None:
lowerCAmelCase__ : Optional[Any] = subparsers.add_parser('''test''' )
else:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=A_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=A_ )
return parser
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Optional[int] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowerCAmelCase__ : Optional[Any] = script_name
else:
lowerCAmelCase__ : Any = f'--config_file={args.config_file} {script_name}'
lowerCAmelCase__ : List[Any] = ['''accelerate-launch'''] + test_args.split()
lowerCAmelCase__ : int = execute_subprocess_async(A_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Any = test_command_parser()
lowerCAmelCase__ : List[Any] = parser.parse_args()
test_command(A_ )
if __name__ == "__main__":
main()
| 106 | 0 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = StableDiffusionControlNetImgaImgPipeline
A_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'})
A_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__lowerCAmelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCAmelCase : Optional[Any] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : List[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_SCREAMING_SNAKE_CASE , device=torch.device(_SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase : Optional[int] = floats_tensor(control_image.shape , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Optional[int] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((64, 64) )
__lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowerCamelCase ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Dict = StableDiffusionControlNetImgaImgPipeline
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : int = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCAmelCase : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__lowerCAmelCase : Dict = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__lowerCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCAmelCase : Optional[int] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase : str = MultiControlNetModel([controlneta, controlneta] )
__lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : Optional[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_SCREAMING_SNAKE_CASE , device=torch.device(_SCREAMING_SNAKE_CASE ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_SCREAMING_SNAKE_CASE , device=torch.device(_SCREAMING_SNAKE_CASE ) , ),
]
__lowerCAmelCase : int = floats_tensor(control_image[0].shape , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Dict = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((64, 64) )
__lowerCAmelCase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : Optional[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 10.0
__lowerCAmelCase : Any = 4
__lowerCAmelCase : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = steps
__lowerCAmelCase : Union[str, Any] = scale
__lowerCAmelCase : Union[str, Any] = pipe(**_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = steps
__lowerCAmelCase : List[Any] = scale
__lowerCAmelCase : Dict = pipe(**_SCREAMING_SNAKE_CASE , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCAmelCase : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = steps
__lowerCAmelCase : Tuple = scale
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = steps
__lowerCAmelCase : Union[str, Any] = scale
__lowerCAmelCase : Union[str, Any] = pipe(**_SCREAMING_SNAKE_CASE , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCamelCase ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.get_dummy_components()
__lowerCAmelCase : Optional[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__lowerCAmelCase : Optional[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_SCREAMING_SNAKE_CASE , controlnet=_SCREAMING_SNAKE_CASE )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase : str = 'evil space-punk bird'
__lowerCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) )
__lowerCAmelCase : str = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) )
__lowerCAmelCase : Union[str, Any] = pipe(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , control_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__lowerCAmelCase : Dict = output.images[0]
assert image.shape == (5_12, 5_12, 3)
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2 | 182 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[int] = ShapEPipeline
A_ : str = ['prompt']
A_ : Any = ['prompt']
A_ : List[Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Optional[int] = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : int = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : Union[str, Any] = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.dummy_prior
__lowerCAmelCase : str = self.dummy_text_encoder
__lowerCAmelCase : List[Any] = self.dummy_tokenizer
__lowerCAmelCase : str = self.dummy_renderer
__lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'cpu'
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = output.images[0]
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : str = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = 1
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Dict = batch_size * [inputs[key]]
__lowerCAmelCase : Any = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__lowerCAmelCase : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : List[str] = pipe(
'a shark' , generator=_SCREAMING_SNAKE_CASE , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 182 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
A : Tuple = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
A : Optional[Any] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
A : Optional[Any] = "zero2"
A : int = "zero3"
A : Optional[int] = [ZEROa, ZEROa]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
SCREAMING_SNAKE_CASE_ = parameterized.to_safe_name("_".join(str(__UpperCAmelCase ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
A : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def __A ( self : Any , __magic_name__ : List[str] , __magic_name__ : str ) -> Union[str, Any]:
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@require_torch_multi_gpu
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : str ) -> Optional[int]:
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def __A ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> Union[str, Any]:
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@require_torch_multi_gpu
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def __A ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Any ) -> str:
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
def __A ( self : Union[str, Any] , __magic_name__ : Any ) -> str:
pass
def __A ( self : Any , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] = 10 , __magic_name__ : Optional[Any] = True , __magic_name__ : List[Any] = True , __magic_name__ : List[Any] = True , ) -> str:
SCREAMING_SNAKE_CASE_ = models[model]
SCREAMING_SNAKE_CASE_ = self.run_trainer(
stage=__magic_name__ , model_name=__magic_name__ , eval_steps=__magic_name__ , num_train_epochs=1 , distributed=__magic_name__ , fpaa=__magic_name__ , )
self.do_checks(__magic_name__ )
return output_dir
def __A ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict = 10 , __magic_name__ : str = 1 , __magic_name__ : str = True , __magic_name__ : Optional[Any] = True , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir("./xxx" , after=__magic_name__ )
SCREAMING_SNAKE_CASE_ = F'''\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__magic_name__ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '''.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE_ = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
SCREAMING_SNAKE_CASE_ = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
SCREAMING_SNAKE_CASE_ = self.get_launcher(__magic_name__ )
SCREAMING_SNAKE_CASE_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__magic_name__ , env=self.get_env() )
return output_dir
def __A ( self : Tuple , __magic_name__ : Optional[Any]=False ) -> Tuple:
SCREAMING_SNAKE_CASE_ = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 118 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Tuple = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase__ : str = input_file.read()
UpperCamelCase__ : List[Any] = regexp.search(__magic_name__ )
return match
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
UpperCamelCase__ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Tuple = regexp.finditer(__magic_name__ )
UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = Path('''./datasets''' )
UpperCamelCase__ : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__magic_name__ ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path('''./datasets''' )
UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__magic_name__ ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 201 | 0 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowerCamelCase ( _UpperCAmelCase ):
def a_ ( self ):
UpperCamelCase : str = tempfile.mkdtemp()
UpperCamelCase : List[Any] = 8
# DPR tok
UpperCamelCase : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase : Any = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
UpperCamelCase : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase : str = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase : str = {"""unk_token""": """<unk>"""}
UpperCamelCase : List[str] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
def a_ ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a_ ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a_ ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
UpperCamelCase : int = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def a_ ( self ):
UpperCamelCase : List[str] = self.get_dummy_dataset()
UpperCamelCase : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
UpperCamelCase : Any = dataset
UpperCamelCase : Tuple = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = self.get_dummy_dataset()
UpperCamelCase : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , """dataset""" )
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
UpperCamelCase : int = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCamelCase : str = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def a_ ( self ):
UpperCamelCase : int = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
UpperCamelCase : Dict = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , """wb""" ) )
UpperCamelCase : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
UpperCamelCase : Optional[int] = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def a_ ( self ):
UpperCamelCase : Any = 1
UpperCamelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a_ ( self ):
UpperCamelCase : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
UpperCamelCase : Dict = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase : Optional[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def a_ ( self ):
UpperCamelCase : Optional[int] = 1
UpperCamelCase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a_ ( self ):
UpperCamelCase : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase : str = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def a_ ( self ):
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a_ ( self ):
UpperCamelCase : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase : List[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def a_ ( self ):
UpperCamelCase : Optional[Any] = 1
UpperCamelCase : List[str] = self.get_dummy_legacy_index_retriever()
UpperCamelCase : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a_ ( self ):
UpperCamelCase : Optional[int] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase : Optional[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def a_ ( self ):
import torch
UpperCamelCase : str = 1
UpperCamelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase : Tuple = [[5, 7], [10, 11]]
UpperCamelCase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase : Any = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
UpperCamelCase : Dict = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def a_ ( self ):
UpperCamelCase : Tuple = self.get_dpr_ctx_encoder_tokenizer()
UpperCamelCase : Optional[int] = 1
UpperCamelCase : int = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = [[5, 7], [10, 11]]
UpperCamelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase : Any = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary.
| 27 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : Optional[Any] = 16
__A : str = 32
def A_ ( snake_case_ : Accelerator ,snake_case_ : int = 1_6 ):
'''simple docstring'''
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase : Optional[int] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(snake_case_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case_ ,max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : Optional[Any] = datasets.map(
snake_case_ ,batched=snake_case_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : str = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
UpperCamelCase : Any = 8
else:
UpperCamelCase : Optional[Any] = None
return tokenizer.pad(
snake_case_ ,padding="""longest""" ,max_length=snake_case_ ,pad_to_multiple_of=snake_case_ ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
UpperCamelCase : str = DataLoader(
tokenized_datasets["""train"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
UpperCamelCase : Dict = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A : int = mocked_dataloaders # noqa: F811
def A_ ( snake_case_ : Tuple ,snake_case_ : Dict ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,snake_case_ ) == "1":
UpperCamelCase : Union[str, Any] = 2
# New Code #
UpperCamelCase : Dict = int(args.gradient_accumulation_steps )
UpperCamelCase : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : str = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config["""lr"""]
UpperCamelCase : int = int(config["""num_epochs"""] )
UpperCamelCase : int = int(config["""seed"""] )
UpperCamelCase : List[Any] = int(config["""batch_size"""] )
UpperCamelCase : Optional[int] = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(snake_case_ )
UpperCamelCase , UpperCamelCase : Dict = get_dataloaders(snake_case_ ,snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[Any] = AdamW(params=model.parameters() ,lr=snake_case_ )
# Instantiate scheduler
UpperCamelCase : str = get_linear_schedule_with_warmup(
optimizer=snake_case_ ,num_warmup_steps=1_0_0 ,num_training_steps=(len(snake_case_ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
with LocalSGD(
accelerator=snake_case_ ,model=snake_case_ ,local_sgd_steps=snake_case_ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
UpperCamelCase : Optional[Any] = model(**snake_case_ )
UpperCamelCase : Optional[int] = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Any = model(**snake_case_ )
UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case_ ,references=snake_case_ ,)
UpperCamelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' ,snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=snake_case_ ,default=snake_case_ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=snake_case_ ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=snake_case_ ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : List[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(snake_case_ ,snake_case_ )
if __name__ == "__main__":
main()
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 35 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
def lowerCAmelCase__ ( _a : dict ):
snake_case_ : List[Any] = set()
# edges = list of graph's edges
snake_case_ : int = get_edges(_a )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
snake_case_ , snake_case_ : Dict = edges.pop()
chosen_vertices.add(_a )
chosen_vertices.add(_a )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_a )
return chosen_vertices
def lowerCAmelCase__ ( _a : dict ):
snake_case_ : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 36 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['image_processor', 'tokenizer']
A : Tuple = 'AutoImageProcessor'
A : Dict = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case_ : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
snake_case_ : Tuple = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> Dict:
return ["input_ids", "attention_mask", "pixel_values"]
| 36 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case_( a__ ):
__UpperCamelCase = '''Salesforce/blip-image-captioning-base'''
__UpperCamelCase = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
__UpperCamelCase = '''image_captioner'''
__UpperCamelCase = AutoModelForVisionaSeq
__UpperCamelCase = ['''image''']
__UpperCamelCase = ['''text''']
def __init__( self : Optional[int] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Tuple ):
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : "Image" ):
return self.pre_processor(images=UpperCamelCase_ , return_tensors='''pt''' )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Tuple ):
return self.model.generate(**UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str ):
return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0].strip()
| 60 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : int = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[str, Any] ):
lowerCAmelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
lowerCAmelCase : Union[str, Any] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
lowerCAmelCase : str = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCAmelCase : str = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
lowerCAmelCase : str = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase : Optional[int] = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
lowerCAmelCase : List[str] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowerCAmelCase : Any = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase : Tuple = key[key.find('''block''' ) + len('''block''' )]
lowerCAmelCase : Tuple = key.replace(f'''block{idx}''' , f'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
lowerCAmelCase : Optional[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowerCAmelCase : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowerCAmelCase : List[str] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowerCAmelCase : List[Any] = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowerCAmelCase : Optional[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowerCAmelCase : List[Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowerCAmelCase : Optional[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowerCAmelCase : int = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase : Optional[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCAmelCase : int = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
lowerCAmelCase : str = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
lowerCAmelCase : int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
lowerCAmelCase : str = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
lowerCAmelCase : Any = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
lowerCAmelCase : List[Any] = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
lowerCAmelCase : Optional[Any] = key.replace('''module.last_layer_depth''' , '''head.head''' )
lowerCAmelCase : Union[str, Any] = value
return new_state_dict
def _snake_case ( _snake_case : Optional[Any] , _snake_case : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase : Optional[int] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase : str = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase : Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase : List[str] = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ):
lowerCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : str = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _snake_case ( _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any]=False , _snake_case : List[str]=None ):
lowerCAmelCase : Optional[int] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : Dict = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase : Tuple = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
lowerCAmelCase : str = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
lowerCAmelCase : Union[str, Any] = model(_snake_case )
lowerCAmelCase : int = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase : str = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCAmelCase : str = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase : List[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
snake_case__ : List[str] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ :Optional[int] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Any = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :List[Any] = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 245 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ :List[str] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A_ :int = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""mask2former"""
UpperCamelCase__ : Tuple =["""swin"""]
UpperCamelCase__ : Dict ={"""hidden_size""": """hidden_dim"""}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__UpperCamelCase : Optional[int] =CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[str] =backbone_config.pop('model_type' )
__UpperCamelCase : str =CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : List[Any] =config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
__UpperCamelCase : Dict =backbone_config
__UpperCamelCase : Optional[int] =feature_size
__UpperCamelCase : Union[str, Any] =mask_feature_size
__UpperCamelCase : Tuple =hidden_dim
__UpperCamelCase : Optional[int] =encoder_feedforward_dim
__UpperCamelCase : Optional[int] =activation_function
__UpperCamelCase : Dict =encoder_layers
__UpperCamelCase : List[Any] =decoder_layers
__UpperCamelCase : int =num_attention_heads
__UpperCamelCase : Optional[Any] =dropout
__UpperCamelCase : int =dim_feedforward
__UpperCamelCase : Any =pre_norm
__UpperCamelCase : Union[str, Any] =enforce_input_projection
__UpperCamelCase : str =common_stride
__UpperCamelCase : List[str] =ignore_value
__UpperCamelCase : Optional[int] =num_queries
__UpperCamelCase : Any =no_object_weight
__UpperCamelCase : int =class_weight
__UpperCamelCase : str =mask_weight
__UpperCamelCase : Dict =dice_weight
__UpperCamelCase : str =train_num_points
__UpperCamelCase : str =oversample_ratio
__UpperCamelCase : int =importance_sample_ratio
__UpperCamelCase : List[str] =init_std
__UpperCamelCase : Union[str, Any] =init_xavier_std
__UpperCamelCase : Any =use_auxiliary_loss
__UpperCamelCase : Tuple =feature_strides
__UpperCamelCase : Dict =output_auxiliary_logits
__UpperCamelCase : Union[str, Any] =decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =copy.deepcopy(self.__dict__ )
__UpperCamelCase : List[Any] =self.backbone_config.to_dict()
__UpperCamelCase : Union[str, Any] =self.__class__.model_type
return output
| 245 | 1 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_A : Dict = logging.getLogger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = """masked_bert"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : str=3_05_22 , SCREAMING_SNAKE_CASE__ : Tuple=7_68 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE__ : Any=1e-1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]="topK" , SCREAMING_SNAKE_CASE__ : int="constant" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = pruning_method
__lowerCAmelCase = mask_init
__lowerCAmelCase = mask_scale
| 229 | '''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_A : List[Any] = 299792458
# Symbols
_A , _A , _A , _A : Union[str, Any] = symbols('''ct x y z''')
def UpperCamelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def UpperCamelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(snake_case_ ) ** 2 )
def UpperCamelCase_ ( snake_case_ : float ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(snake_case_ ), -gamma(snake_case_ ) * beta(snake_case_ ), 0, 0],
[-gamma(snake_case_ ) * beta(snake_case_ ), gamma(snake_case_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase_ ( snake_case_ : float , snake_case_ : np.ndarray | None = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
__lowerCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(snake_case_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_A : str = transform(29979245)
print('''Example of four vector: ''')
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
_A : int = {ct: c, x: 1, y: 1, z: 1}
_A : Any = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 229 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A : List[Any] =16
_A : List[str] =32
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 16 ) -> Any:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase__ : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : Union[str, Any] = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : Dict = 8
else:
lowerCamelCase__ : Tuple = None
return tokenizer.pad(
UpperCamelCase , padding="""longest""" , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase__ : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
lowerCamelCase__ : List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A : List[Any] =mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase ) == "1":
lowerCamelCase__ : str = 2
# Initialize accelerator
lowerCamelCase__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Any = config["""lr"""]
lowerCamelCase__ : Tuple = int(config["""num_epochs"""] )
lowerCamelCase__ : List[str] = int(config["""seed"""] )
lowerCamelCase__ : Dict = int(config["""batch_size"""] )
lowerCamelCase__ : List[Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ : Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ : List[str] = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = get_dataloaders(UpperCamelCase , UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
lowerCamelCase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ : Any = model(**UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = outputs.loss
lowerCamelCase__ : str = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCamelCase__ : Any = 0
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase )
lowerCamelCase__ : List[str] = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Any = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCamelCase__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase__ : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
lowerCamelCase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase , default=UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 129 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[Any] ={
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowercase ( _lowercase ):
a = """deformable_detr"""
a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self: Optional[int] , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , UpperCamelCase__: List[str]=3 , UpperCamelCase__: Any=300 , UpperCamelCase__: Optional[int]=1_024 , UpperCamelCase__: int=6 , UpperCamelCase__: str=1_024 , UpperCamelCase__: Optional[Any]=8 , UpperCamelCase__: Optional[Any]=6 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=8 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any="relu" , UpperCamelCase__: Any=256 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Any=0.0 , UpperCamelCase__: List[str]=0.02 , UpperCamelCase__: str=1.0 , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: str="sine" , UpperCamelCase__: Optional[Any]="resnet50" , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: Any=4 , UpperCamelCase__: int=4 , UpperCamelCase__: int=False , UpperCamelCase__: Optional[Any]=300 , UpperCamelCase__: str=False , UpperCamelCase__: int=1 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Optional[int]=1 , UpperCamelCase__: int=1 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[str]=0.25 , UpperCamelCase__: Any=False , **UpperCamelCase__: Optional[Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase__ : Union[str, Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = backbone_config.get("""model_type""" )
lowerCamelCase__ : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Optional[Any] = config_class.from_dict(UpperCamelCase__ )
lowerCamelCase__ : Tuple = use_timm_backbone
lowerCamelCase__ : Tuple = backbone_config
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = num_queries
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : str = d_model
lowerCamelCase__ : Dict = encoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : int = encoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Optional[Any] = decoder_layers
lowerCamelCase__ : Optional[int] = decoder_attention_heads
lowerCamelCase__ : int = dropout
lowerCamelCase__ : Optional[Any] = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : Dict = init_xavier_std
lowerCamelCase__ : Tuple = encoder_layerdrop
lowerCamelCase__ : str = auxiliary_loss
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : Tuple = backbone
lowerCamelCase__ : Tuple = use_pretrained_backbone
lowerCamelCase__ : Optional[int] = dilation
# deformable attributes
lowerCamelCase__ : Optional[int] = num_feature_levels
lowerCamelCase__ : Tuple = encoder_n_points
lowerCamelCase__ : Tuple = decoder_n_points
lowerCamelCase__ : Dict = two_stage
lowerCamelCase__ : int = two_stage_num_proposals
lowerCamelCase__ : Any = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCamelCase__ : Optional[int] = class_cost
lowerCamelCase__ : List[str] = bbox_cost
lowerCamelCase__ : Any = giou_cost
# Loss coefficients
lowerCamelCase__ : Union[str, Any] = mask_loss_coefficient
lowerCamelCase__ : Tuple = dice_loss_coefficient
lowerCamelCase__ : int = bbox_loss_coefficient
lowerCamelCase__ : Optional[Any] = giou_loss_coefficient
lowerCamelCase__ : Dict = eos_coefficient
lowerCamelCase__ : Any = focal_alpha
lowerCamelCase__ : Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Dict ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self: Any ):
return self.d_model
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase__ : int = self.backbone_config.to_dict()
lowerCamelCase__ : int = self.__class__.model_type
return output
| 129 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__: Tuple = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
UpperCamelCase__: List[Any] = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
UpperCamelCase__: List[Any] = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = RoFormerTokenizer
def __init__( self : Optional[Any] , __snake_case : Optional[int]=None , __snake_case : Optional[int]=None , __snake_case : List[Any]=True , __snake_case : Optional[Any]="[UNK]" , __snake_case : List[str]="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : List[str]="[MASK]" , __snake_case : Optional[Any]=True , __snake_case : List[Any]=None , **__snake_case : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __snake_case ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __snake_case ) != strip_accents
):
UpperCAmelCase : List[str] = getattr(__snake_case , pre_tok_state.pop('''type''' ) )
UpperCAmelCase : Union[str, Any] = do_lower_case
UpperCAmelCase : int = strip_accents
UpperCAmelCase : Optional[int] = pre_tok_class(**__snake_case )
UpperCAmelCase : int = do_lower_case
def __getstate__( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.__dict__.copy()
UpperCAmelCase : List[Any] = BertPreTokenizer()
return state
def __setstate__( self : Tuple , __snake_case : int ) -> Optional[int]:
UpperCAmelCase : Tuple = d
UpperCAmelCase : List[Any] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCAmelCase : Tuple = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) )
def A ( self : List[str] , __snake_case : str , __snake_case : str=None ) -> Optional[int]:
UpperCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def A ( self : str , __snake_case : List[str] , __snake_case : int=None , __snake_case : List[str]=None , __snake_case : Union[str, Any]=False , **__snake_case : List[Any] , ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = BertPreTokenizer()
return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
| 23 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "โ"
else:
outcome += "โ"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : str = WavaVecaForSequenceClassification.from_pretrained(a_ , config=a_ )
A_ : Optional[Any] = downstream_dict["""projector.weight"""]
A_ : str = downstream_dict["""projector.bias"""]
A_ : Tuple = downstream_dict["""model.post_net.linear.weight"""]
A_ : Any = downstream_dict["""model.post_net.linear.bias"""]
return model
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : Dict = WavaVecaForAudioFrameClassification.from_pretrained(a_ , config=a_ )
A_ : List[str] = downstream_dict["""model.linear.weight"""]
A_ : Optional[Any] = downstream_dict["""model.linear.bias"""]
return model
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Any = WavaVecaForXVector.from_pretrained(a_ , config=a_ )
A_ : List[Any] = downstream_dict["""connector.weight"""]
A_ : Optional[int] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ : Union[str, Any] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
A_ : Dict = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
A_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A_ : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A_ : Optional[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A_ : List[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = torch.load(a_ , map_location="""cpu""" )
A_ : str = checkpoint["""Downstream"""]
A_ : List[Any] = WavaVecaConfig.from_pretrained(a_ )
A_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
a_ , return_attention_mask=a_ , do_normalize=a_ )
A_ : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A_ : Union[str, Any] = convert_classification(a_ , a_ , a_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A_ : Dict = convert_diarization(a_ , a_ , a_ )
elif arch.endswith("""ForXVector""" ):
A_ : List[str] = convert_xvector(a_ , a_ , a_ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
A_ : str = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase__ : Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 368 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Dict = []
for part_id in partition_order:
A_ : List[str] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(a_ ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Optional[int] = spark.range(1_0_0 ).repartition(1 )
A_ : Optional[Any] = Spark(a_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[str] = spark.range(1_0 ).repartition(2 )
A_ : List[str] = [1, 0]
A_ : List[Any] = _generate_iterable_examples(a_ , a_ ) # Reverse the partitions.
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , a_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A_ , A_ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
A_ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Dict = spark.range(1_0 ).repartition(1 )
A_ : int = SparkExamplesIterable(a_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(a_ ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Union[str, Any] = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A_ : Optional[int] = lambda a_ : x.reverse()
A_ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [2, 1, 0] )
A_ : Any = SparkExamplesIterable(a_ ).shuffle_data_sources(a_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
A_ : str = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A_ : Optional[Any] = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(1_0_0 ).repartition(1 )
A_ : str = Spark(a_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 164 | 0 |
import pytest
lowercase__ : Optional[int] = "__dummy_dataset1__"
lowercase__ : str = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A_ ( ) -> List[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A_ ( snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : List[str] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = dataset_loading_script_name
__UpperCamelCase = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=snake_case )
__UpperCamelCase = script_dir / f"{script_name}.py"
with open(snake_case , '''w''' ) as f:
f.write(snake_case )
return str(snake_case )
| 328 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase__ : Union[str, Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
lowercase__ : Optional[Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
lowercase__ : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
lowercase__ : Optional[int] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
lowercase__ : Optional[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 10, 100] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3.0 )-> Union[str, Any]:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor:
__UpperCamelCase = []
__UpperCamelCase = Counter()
__UpperCamelCase = 0
__UpperCamelCase = defaultdict(SCREAMING_SNAKE_CASE_ )
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
for candidate in candidates:
__UpperCamelCase = candidate + '''\n''' + test_case
__UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
futures.append(SCREAMING_SNAKE_CASE_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__UpperCamelCase , __UpperCamelCase = [], []
for result in results.values():
result.sort()
__UpperCamelCase = [r[1]['''passed'''] for r in result]
total.append(len(SCREAMING_SNAKE_CASE_ ) )
correct.append(sum(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = k
__UpperCamelCase = {F"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A_ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
def estimator(snake_case : int , snake_case : int , snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = itertools.repeat(snake_case , len(snake_case ) )
else:
assert len(snake_case ) == len(snake_case )
__UpperCamelCase = iter(snake_case )
return np.array([estimator(int(snake_case ) , int(snake_case ) , snake_case ) for n, c in zip(snake_case , snake_case )] )
| 328 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCamelCase__ = '''http://www.mocksite.com/file1.txt'''
UpperCamelCase__ = '''"text": ["foo", "foo"]'''
UpperCamelCase__ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class lowerCamelCase_ :
lowerCAmelCase__ = 2_0_0
lowerCAmelCase__ = {'Content-Length': '100'}
lowerCAmelCase__ = {}
def lowercase_ ( self : List[str] , **_A : Optional[int] ):
'''simple docstring'''
return [bytes(_A , '''utf-8''' )]
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
import requests
monkeypatch.setattr(lowerCAmelCase__ , '''request''' , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = URL
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = url
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[Any] = [url]
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = {'''train''': url}
UpperCAmelCase__ : Tuple = '''dummy'''
UpperCAmelCase__ : Optional[Any] = '''downloads'''
UpperCAmelCase__ : Dict = tmp_path
UpperCAmelCase__ : List[str] = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , use_etag=lowerCAmelCase__ , )
UpperCAmelCase__ : Optional[Any] = DownloadManager(dataset_name=lowerCAmelCase__ , download_config=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = dl_manager.download(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = [downloaded_paths]
UpperCAmelCase__ : str = [urls]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
assert "train" in downloaded_paths.keys()
UpperCAmelCase__ : Optional[Any] = downloaded_paths.values()
UpperCAmelCase__ : Tuple = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
UpperCAmelCase__ : Any = Path(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
UpperCAmelCase__ : List[Any] = downloaded_path.read_text()
assert content == CONTENT
UpperCAmelCase__ : Optional[int] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
UpperCAmelCase__ : int = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : str = str(lowerCAmelCase__ )
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = filename
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = [filename]
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = {'''train''': filename}
UpperCAmelCase__ : Dict = '''dummy'''
UpperCAmelCase__ : str = xz_file.parent
UpperCAmelCase__ : Tuple = '''extracted'''
UpperCAmelCase__ : Dict = DownloadConfig(
cache_dir=lowerCAmelCase__ , use_etag=lowerCAmelCase__ , )
UpperCAmelCase__ : Optional[Any] = DownloadManager(dataset_name=lowerCAmelCase__ , download_config=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = dl_manager.extract(lowerCAmelCase__ )
UpperCAmelCase__ : Any = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : List[str] = [extracted_paths]
UpperCAmelCase__ : str = [paths]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
assert "train" in extracted_paths.keys()
UpperCAmelCase__ : List[str] = extracted_paths.values()
UpperCAmelCase__ : List[str] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
UpperCAmelCase__ : Dict = Path(lowerCAmelCase__ )
UpperCAmelCase__ : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase__ , etag=lowerCAmelCase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
UpperCAmelCase__ : Optional[int] = extracted_path.read_text()
UpperCAmelCase__ : Any = text_file.read_text()
assert extracted_file_content == expected_file_content
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(lowerCAmelCase__ , start=1 ):
UpperCAmelCase__ : List[Any] = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : str = request.getfixturevalue(lowerCAmelCase__ )
UpperCAmelCase__ : int = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) , start=1 ):
_test_jsonl(lowerCAmelCase__ , lowerCAmelCase__ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : str = request.getfixturevalue(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) , start=1 ):
_test_jsonl(lowerCAmelCase__ , lowerCAmelCase__ )
assert num_tar == 1
assert num_jsonl == 2
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase__ ) , start=1 ):
assert os.path.basename(lowerCAmelCase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 299 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Tuple = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase__ : Optional[int] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase_ ( self : int , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) )
UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' )
UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : str = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Optional[int] = processor(text=_A )
UpperCAmelCase__ : List[str] = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ):
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase__ : List[Any] = processor.decode(_A )
UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase_ ( self : Any , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A )
UpperCAmelCase__ : str = list(_A )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_feature_extractor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : str = self._get_dummy_logits()
UpperCAmelCase__ : Optional[int] = 15
UpperCAmelCase__ : Dict = -2_0.0
UpperCAmelCase__ : Optional[Any] = -4.0
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : List[Any] = decoded_processor_out.text
UpperCAmelCase__ : List[str] = list(_A )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_decoder()
UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[int] = self._get_dummy_logits()
UpperCAmelCase__ : List[str] = 2.0
UpperCAmelCase__ : Union[str, Any] = 5.0
UpperCAmelCase__ : str = -2_0.0
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text
UpperCAmelCase__ : Tuple = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch(
_A , _A , )
UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Dict = os.listdir(_A )
UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A )
UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : List[str] = os.listdir(_A )
UpperCAmelCase__ : Any = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' )
UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()
UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A )
UpperCAmelCase__ : int = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowercase_ ( _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : str = self._get_dummy_logits()[0]
UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = self._get_dummy_logits()
UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : List[Any] = iter(_A )
UpperCAmelCase__ : Optional[Any] = next(_A )
UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy()
UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A )
UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : Any = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
| 299 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int ) -> list[list[int]]:
"""simple docstring"""
a_ : list[list[int]] = []
a_ : list[int] = []
a_ : Dict = 0
a_ : Optional[int] = sum(__A )
create_state_space_tree(__A , __A , __A , __A , __A , __A )
return result
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int , __A : int , __A : list[int] , __A : list[list[int]] , __A : int , ) -> None:
"""simple docstring"""
if sum(__A ) > max_sum or (remaining_nums_sum + sum(__A )) < max_sum:
return
if sum(__A ) == max_sum:
result.append(__A )
return
for index in range(__A , len(__A ) ):
create_state_space_tree(
__A , __A , index + 1 , [*path, nums[index]] , __A , remaining_nums_sum - nums[index] , )
UpperCAmelCase_ : str = [3, 34, 4, 12, 5, 2]
UpperCAmelCase_ : Optional[Any] = 9
UpperCAmelCase_ : str = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 32 | from __future__ import annotations
def UpperCamelCase__ ( A__ , A__ , A__ ) -> tuple[float, list[float]]:
snake_case__ : Optional[Any] = list(range(len(A__ ) ) )
snake_case__ : str = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
snake_case__ : float = 0
snake_case__ : list[float] = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
snake_case__ : str = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case__ : str = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 0 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = [[0] * n for i in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = y_points[i]
for i in range(2, __lowerCamelCase ):
for j in range(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 325 |
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }") | 325 | 1 |
from collections.abc import Callable
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : Callable , lowercase : float , lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowercase ):
lowerCamelCase_ = y[k] + step_size * ode_func(lowercase , y[k] )
lowerCamelCase_ = y[k] + (
(step_size / 2) * (ode_func(lowercase , y[k] ) + ode_func(x + step_size , lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : Optional[str] , lowercase : Optional[str] ):
'''simple docstring'''
lowerCamelCase_ = to_pil_image(lowercase )
lowerCamelCase_ , lowerCamelCase_ = pil_image.size
lowerCamelCase_ = pytesseract.image_to_data(lowercase , lang=lowercase , output_type='dict' , config=lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCamelCase_ = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
lowerCamelCase_ = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase_ = []
for x, y, w, h in zip(lowercase , lowercase , lowercase , lowercase ):
lowerCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
lowerCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase , lowercase , lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : int , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : float = 1 / 255 , A_ : bool = True , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = True , A_ : Optional[str] = None , A_ : Optional[str] = "" , **A_ : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_value
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCamelCase_ = apply_ocr
lowerCamelCase_ = ocr_lang
lowerCamelCase_ = tesseract_config
def a__ ( self : str , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCamelCase_ = (size['height'], size['width'])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : Union[float, Iterable[float]] , A_ : Union[float, Iterable[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : List[Any] , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Dict=None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = None , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCamelCase_ = []
lowerCamelCase_ = []
for image in images:
lowerCamelCase_ , lowerCamelCase_ = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
lowerCamelCase_ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = BatchFeature(data={'pixel_values': images} , tensor_type=A_ )
if apply_ocr:
lowerCamelCase_ = words_batch
lowerCamelCase_ = boxes_batch
return data
| 204 | 1 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def __a ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
if len(UpperCAmelCase ) != 32:
raise ValueError("""Input must be of length 32""" )
A = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __a ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
A = format(UpperCAmelCase , """08x""" )[-8:]
A = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def __a ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
A = B""""""
for char in message:
bit_string += format(UpperCAmelCase , """08b""" ).encode("""utf-8""" )
A = format(len(UpperCAmelCase ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __a ( UpperCAmelCase ) ->Generator[list[int], None, None]:
"""simple docstring"""
if len(UpperCAmelCase ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(UpperCAmelCase ) , 512 ):
A = bit_string[pos : pos + 512]
A = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
A = format(UpperCAmelCase , """032b""" )
A = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase , 2 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return (a + b) % 2**32
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __a ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
A = preprocess(UpperCAmelCase )
A = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A = 0X67_45_23_01
A = 0XEF_CD_AB_89
A = 0X98_BA_DC_FE
A = 0X10_32_54_76
A = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase ):
A = aa
A = ba
A = ca
A = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A = d ^ (b & (c ^ d))
A = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A = c ^ (d & (b ^ c))
A = (5 * i + 1) % 16
elif i <= 47:
A = b ^ c ^ d
A = (3 * i + 5) % 16
else:
A = c ^ (b | not_aa(UpperCAmelCase ))
A = (7 * i) % 16
A = (f + a + added_consts[i] + block_words[g]) % 2**32
A = d
A = c
A = b
A = sum_aa(UpperCAmelCase , left_rotate_aa(UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
A = sum_aa(UpperCAmelCase , UpperCAmelCase )
A = sum_aa(UpperCAmelCase , UpperCAmelCase )
A = sum_aa(UpperCAmelCase , UpperCAmelCase )
A = sum_aa(UpperCAmelCase , UpperCAmelCase )
A = reformat_hex(UpperCAmelCase ) + reformat_hex(UpperCAmelCase ) + reformat_hex(UpperCAmelCase ) + reformat_hex(UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
import os
def __a ( ) ->List[Any]:
"""simple docstring"""
A = os.path.join(os.path.dirname(UpperCAmelCase ) , """num.txt""" )
with open(UpperCAmelCase ) as file_hand:
return str(sum(int(UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowercase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , **_lowercase : Optional[Any] ):
super().__init__(**_lowercase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Union[str, Any] , _lowercase : Union[str, List[str], "Image", List["Image"]] , **_lowercase : Tuple ):
return super().__call__(_lowercase , **_lowercase )
def a ( self : str , **_lowercase : Any ):
__UpperCAmelCase = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__UpperCAmelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def a ( self : str , _lowercase : str , _lowercase : Union[str, Any]=None , _lowercase : List[str]="This is a photo of {}." ):
__UpperCAmelCase = load_image(_lowercase )
__UpperCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase = candidate_labels
__UpperCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
__UpperCAmelCase = [text_inputs]
return inputs
def a ( self : Dict , _lowercase : Any ):
__UpperCAmelCase = model_inputs.pop('''candidate_labels''' )
__UpperCAmelCase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _lowercase ):
__UpperCAmelCase = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase = text_inputs[0][0]
__UpperCAmelCase = self.model(**_lowercase , **_lowercase )
__UpperCAmelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def a ( self : Optional[Any] , _lowercase : Optional[Any] ):
__UpperCAmelCase = model_outputs.pop('''candidate_labels''' )
__UpperCAmelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
__UpperCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase = probs.tolist()
if not isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [scores]
elif self.framework == "tf":
__UpperCAmelCase = stable_softmax(_lowercase , axis=-1 )
__UpperCAmelCase = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 332 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Dict , _lowercase : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__UpperCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ):
__UpperCAmelCase = '''sgugger/tiny-distilbert-classification'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
# set architectures equal to `None`
__UpperCAmelCase = None
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Tuple ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Any ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() )
def a ( self : List[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowercase : str ):
self.assertTrue(hasattr(_lowercase , '''sequential''' ) )
self.assertTrue(hasattr(_lowercase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowercase , '''current''' ) )
self.assertTrue(hasattr(_lowercase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
| 332 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 0 |
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= 0
__lowercase= len(lowercase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase= left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
__lowercase= sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowercase= left
__lowercase= point
elif point > right:
__lowercase= right
__lowercase= point
else:
if item < current_item:
__lowercase= point - 1
else:
__lowercase= point + 1
return None
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase= left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif point > right:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , lowercase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , point + 1 , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if collection != sorted(lowercase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowerCAmelCase = 0
if debug == 1:
lowerCAmelCase = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowerCAmelCase = 6_7
lowerCAmelCase = interpolation_search(collection, target)
if result is not None:
print(F'{target} found at positions: {result}')
else:
print('''Not found''')
| 295 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A ( nn.Module ):
def __init__(self ):
super().__init__()
__lowercase= nn.Linear(3 , 4 )
__lowercase= nn.BatchNormad(4 )
__lowercase= nn.Linear(4 , 5 )
def _A (self , lowerCAmelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) )
class A ( A_ ):
def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return (args[0] + 1,) + args[1:], kwargs
class A ( A_ ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return output + 1
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(test_model._hf_hook , lowerCAmelCase )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(x + 1 )
__lowercase= test_model(x + 2 )
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__lowercase= True
__lowercase= test_model(lowerCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) )
__lowercase= torch.randn(2 , 3 ).to(0 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
__lowercase= {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 295 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 319 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_UpperCamelCase = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = '''cpu'''
_UpperCamelCase = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_UpperCamelCase = '''path-to-your-trained-model'''
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCamelCase = pipe.to(device)
# to channels last
_UpperCamelCase = pipe.unet.to(memory_format=torch.channels_last)
_UpperCamelCase = pipe.vae.to(memory_format=torch.channels_last)
_UpperCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_UpperCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_UpperCamelCase = torch.randn(2, 4, 64, 64)
_UpperCamelCase = torch.rand(1) * 999
_UpperCamelCase = torch.randn(2, 77, 768)
_UpperCamelCase = (sample, timestep, encoder_hidden_status)
try:
_UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_UpperCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_UpperCamelCase = 666
_UpperCamelCase = torch.Generator(device).manual_seed(seed)
_UpperCamelCase = {'''generator''': generator}
if args.steps is not None:
_UpperCamelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_UpperCamelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 254 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = ['pixel_values']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : Dict = size if size is not None else {"shortest_edge": 224}
_lowercase : List[Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
_lowercase : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : Tuple = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name="crop_size" )
_lowercase : List[str] = do_resize
_lowercase : Dict = size
_lowercase : Any = resample
_lowercase : int = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : Tuple = do_rescale
_lowercase : Any = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowercase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_lowercase : Optional[int] = do_convert_rgb
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : List[str] = get_resize_output_image_size(_UpperCamelCase , size=size["shortest_edge"] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size["height"], size["width"]) , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
_lowercase : Union[str, Any] = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(_UpperCamelCase , param_name="size" , default_to_square=_UpperCamelCase )
_lowercase : List[Any] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : Tuple = get_size_dict(_UpperCamelCase , param_name="crop_size" , default_to_square=_UpperCamelCase )
_lowercase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : Dict = image_std if image_std is not None else self.image_std
_lowercase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowercase : str = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowercase : List[Any] = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
_lowercase : List[Any] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_lowercase : Optional[Any] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_lowercase : Optional[int] = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : Any = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_lowercase : List[Any] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_lowercase : List[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_lowercase : Dict = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 250 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = StableDiffusionInpaintPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase = frozenset([] )
def __UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , )
_a = PNDMScheduler(skip_prk_steps=__magic_name__ )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_a = CLIPTextModel(__magic_name__ )
_a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=0 ) -> List[Any]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__magic_name__ ) ).convert('RGB' ).resize((64, 64) )
_a = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__magic_name__ ).startswith('mps' ):
_a = torch.manual_seed(__magic_name__ )
else:
_a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_a = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionInpaintPipeline(**__magic_name__ )
_a = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
_a = self.get_dummy_inputs(__magic_name__ )
_a = sd_pipe(**__magic_name__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> Dict:
_a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_a = 'stabilityai/stable-diffusion-2-inpainting'
_a = StableDiffusionInpaintPipeline.from_pretrained(__magic_name__ , safety_checker=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
_a = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , generator=__magic_name__ , output_type='np' , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_a = 'stabilityai/stable-diffusion-2-inpainting'
_a = StableDiffusionInpaintPipeline.from_pretrained(
__magic_name__ , torch_dtype=torch.floataa , safety_checker=__magic_name__ , )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
_a = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , generator=__magic_name__ , output_type='np' , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __UpperCAmelCase ( self ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a = 'stabilityai/stable-diffusion-2-inpainting'
_a = PNDMScheduler.from_pretrained(__magic_name__ , subfolder='scheduler' )
_a = StableDiffusionInpaintPipeline.from_pretrained(
__magic_name__ , safety_checker=__magic_name__ , scheduler=__magic_name__ , torch_dtype=torch.floataa , )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , generator=__magic_name__ , num_inference_steps=2 , output_type='np' , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 104 |
'''simple docstring'''
from __future__ import annotations
def _A (lowerCAmelCase__ :int ) -> list[int]:
'''simple docstring'''
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase__ )
if n > 1:
factors.append(lowerCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase =True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase =False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase =subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _UpperCAmelCase ( ):
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
print('Generating prime p...' )
__UpperCamelCase =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
print('Generating prime q...' )
__UpperCamelCase =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__UpperCamelCase =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__UpperCamelCase =cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) )
__UpperCamelCase =(n, e)
__UpperCamelCase =(n, d)
return (public_key, private_key)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__UpperCamelCase , __UpperCamelCase =generate_key(SCREAMING_SNAKE_CASE__ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 117 | 0 |
def lowercase_ (A : int = 1_0_0 ):
snake_case__ : str = 0
snake_case__ : Tuple = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 277 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Tuple = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'xlnet'
_snake_case = ['mems']
_snake_case = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="bi" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="tanh" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
__UpperCamelCase = d_model // n_head
__UpperCamelCase = ff_activation
__UpperCamelCase = d_inner
__UpperCamelCase = untie_r
__UpperCamelCase = attn_type
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = dropout
__UpperCamelCase = mem_len
__UpperCamelCase = reuse_len
__UpperCamelCase = bi_data
__UpperCamelCase = clamp_len
__UpperCamelCase = same_length
__UpperCamelCase = summary_type
__UpperCamelCase = summary_use_proj
__UpperCamelCase = summary_activation
__UpperCamelCase = summary_last_dropout
__UpperCamelCase = start_n_top
__UpperCamelCase = end_n_top
__UpperCamelCase = bos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = kwargs['''use_cache''']
__UpperCamelCase = use_mems_eval
__UpperCamelCase = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 328 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : Optional[int], UpperCamelCase_ : Tuple, UpperCamelCase_ : int, UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
for attribute in key.split("."):
__lowercase = getattr(UpperCamelCase_, UpperCamelCase_)
if weight_type is not None:
__lowercase = getattr(UpperCamelCase_, UpperCamelCase_).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""")
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : str) -> Dict:
'''simple docstring'''
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
__lowercase = hf_model.adapter
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, hf_model.config.feat_extract_norm == "group", )
__lowercase = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."]):
load_adapter(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(UpperCamelCase_)[0].split(".")[-2]
__lowercase = mapped_key.replace("*", UpperCamelCase_)
if "weight_g" in name:
__lowercase = "weight_g"
elif "weight_v" in name:
__lowercase = "weight_v"
elif "bias" in name:
__lowercase = "bias"
elif "weight" in name:
__lowercase = "weight"
else:
__lowercase = None
set_recursively(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
continue
if not is_used:
unused_weights.append(UpperCamelCase_)
logger.warning(F"""Unused weights: {unused_weights}""")
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Tuple, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : str, UpperCamelCase_ : str) -> Optional[Any]:
'''simple docstring'''
__lowercase = full_name.split("conv_layers.")[-1]
__lowercase = name.split(".")
__lowercase = int(items[0])
__lowercase = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(UpperCamelCase_)
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : List[str], UpperCamelCase_ : Tuple, UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = full_name.split("adaptor.")[-1]
__lowercase = name.split(".")
if items[1].isdigit():
__lowercase = int(items[1])
else:
__lowercase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__lowercase = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""")
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__lowercase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__lowercase = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""")
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__lowercase = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""")
elif isinstance(UpperCamelCase_, UpperCamelCase_):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__lowercase = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""")
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__lowercase = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""")
else:
unused_weights.append(UpperCamelCase_)
def _A ( UpperCamelCase_ : Tuple) -> Optional[int]:
'''simple docstring'''
__lowercase ,__lowercase = emb.weight.shape
__lowercase = nn.Linear(UpperCamelCase_, UpperCamelCase_, bias=UpperCamelCase_)
__lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Any, UpperCamelCase_ : str, UpperCamelCase_ : int, UpperCamelCase_ : List[Any], UpperCamelCase_ : List[str], UpperCamelCase_ : int, UpperCamelCase_ : Dict, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : str, ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaConfig.from_pretrained(
UpperCamelCase_, add_adapter=UpperCamelCase_, adapter_stride=UpperCamelCase_, adapter_kernel_size=UpperCamelCase_, use_auth_token=UpperCamelCase_, output_hidden_size=UpperCamelCase_, )
__lowercase = MBartConfig.from_pretrained(UpperCamelCase_)
# load model
__lowercase ,__lowercase ,__lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/")[:-1]),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
}, )
__lowercase = model[0].eval()
# load feature extractor
__lowercase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_, use_auth_token=UpperCamelCase_)
# set weights for wav2vec2 encoder
__lowercase = WavaVecaModel(UpperCamelCase_)
recursively_load_weights_wavaveca(model.encoder, UpperCamelCase_)
# load decoder weights
__lowercase = MBartForCausalLM(UpperCamelCase_)
__lowercase ,__lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=UpperCamelCase_)
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""")
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""")
__lowercase = SpeechEncoderDecoderModel(encoder=UpperCamelCase_, decoder=UpperCamelCase_)
__lowercase = False
__lowercase = MBartaaTokenizer(UpperCamelCase_)
tokenizer.save_pretrained(UpperCamelCase_)
__lowercase = hf_wavavec.config.to_dict()
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer.eos_token_id
__lowercase = "mbart50"
__lowercase = "wav2vec2"
__lowercase = tokenizer.eos_token_id
__lowercase = 250004
__lowercase = tokenizer.eos_token_id
__lowercase = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_)
hf_wavavec.save_pretrained(UpperCamelCase_)
feature_extractor.save_pretrained(UpperCamelCase_)
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=10_24, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_00_04, type=int, help='`decoder_start_token_id` of model config')
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 144 |
"""simple docstring"""
import argparse
import os
import re
_a = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
_a = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : bool = False) -> Optional[Any]:
'''simple docstring'''
with open(UpperCamelCase_, "r", encoding="utf-8") as f:
__lowercase = f.read()
__lowercase = content.split("\n")
__lowercase = []
__lowercase = 0
while line_idx < len(UpperCamelCase_):
if _re_intro_mapping.search(lines[line_idx]) is not None:
__lowercase = len(re.search(r"^(\s*)\S", lines[line_idx]).groups()[0]) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "("):
new_lines.append(lines[line_idx])
line_idx += 1
__lowercase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__lowercase = line_idx
while not lines[line_idx].startswith(" " * indent + ")"):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1]))
else:
blocks.append(lines[line_idx])
line_idx += 1
# Sort blocks by their identifiers
__lowercase = sorted(UpperCamelCase_, key=lambda UpperCamelCase_: _re_identifier.search(UpperCamelCase_).groups()[0])
new_lines += blocks
else:
new_lines.append(lines[line_idx])
line_idx += 1
if overwrite:
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write("\n".join(UpperCamelCase_))
elif "\n".join(UpperCamelCase_) != content:
return True
def _A ( UpperCamelCase_ : bool = False) -> Optional[Any]:
'''simple docstring'''
__lowercase = [os.path.join(UpperCamelCase_, UpperCamelCase_) for f in os.listdir(UpperCamelCase_) if f.endswith(".py")]
__lowercase = [sort_auto_mapping(UpperCamelCase_, overwrite=UpperCamelCase_) for fname in fnames]
if not overwrite and any(UpperCamelCase_):
__lowercase = [f for f, d in zip(UpperCamelCase_, UpperCamelCase_) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(UpperCamelCase_)}. Run `make style` to fix"""
" this.")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_a = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 144 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowercase ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowercase__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _lowercase ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _lowercase ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowercase__ ):
http_head('''https://huggingface.co''' )
| 275 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
__lowerCAmelCase : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple
return x
__lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : int = output_size
# determine new height and width
__lowerCAmelCase : Optional[Any] = output_height / input_height
__lowerCAmelCase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCAmelCase : str = scale_width
else:
# fit height
__lowerCAmelCase : str = scale_height
__lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
__lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384}
__lowerCAmelCase : Dict = get_size_dict(A_ )
__lowerCAmelCase : Optional[Any] = do_resize
__lowerCAmelCase : int = size
__lowerCAmelCase : Dict = keep_aspect_ratio
__lowerCAmelCase : List[Any] = ensure_multiple_of
__lowerCAmelCase : Tuple = resample
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Any = rescale_factor
__lowerCAmelCase : List[Any] = do_normalize
__lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
__lowerCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(
A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image:
'''simple docstring'''
__lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Optional[int] = size if size is not None else self.size
__lowerCAmelCase : Union[str, Any] = get_size_dict(A_ )
__lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCAmelCase : Tuple = resample if resample is not None else self.resample
__lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images]
if do_resize:
__lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
__lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
__lowerCAmelCase : Dict = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
__lowerCAmelCase : Optional[int] = target_sizes.numpy()
__lowerCAmelCase : List[str] = []
for idx in range(len(A_ ) ):
__lowerCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
__lowerCAmelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
__lowerCAmelCase : Any = logits.argmax(dim=1 )
__lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 275 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case = r'\w+[.]\d+'
snake_case = re.findall(_UpperCamelCase , _UpperCamelCase )
for pat in pats:
snake_case = key.replace(_UpperCamelCase , '_'.join(pat.split('.' ) ) )
return key
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : int ) -> str:
"""simple docstring"""
snake_case = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict=4_2 ) -> Dict:
"""simple docstring"""
snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case = flax_model.init_weights(PRNGKey(_UpperCamelCase ) )
snake_case = flatten_dict(_UpperCamelCase )
snake_case = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case = rename_key(_UpperCamelCase )
snake_case = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
snake_case ,snake_case = rename_key_and_reshape_tensor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
| 365 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
snake_case = BeautifulSoup(requests.get(_UpperCamelCase ).text , 'html.parser' )
snake_case = soup.findAll('h1' )
snake_case = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_UpperCamelCase , _UpperCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 149 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
A__ : int =["pixel_values"]
def __init__( self : int , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : Any , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple , ):
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ):
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ )
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any ):
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : int , ):
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 176 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = """โ"""
__SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__SCREAMING_SNAKE_CASE : str = {
"""google/pegasus-xsum""": 512,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase: Optional[int] = PegasusTokenizer
__UpperCamelCase: Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict , A : List[str]=None , A : Union[str, Any]=None , A : Optional[int]="<pad>" , A : Tuple="</s>" , A : Union[str, Any]="<unk>" , A : Union[str, Any]="<mask_2>" , A : Dict="<mask_1>" , A : Union[str, Any]=None , A : int=103 , **A : Optional[Any] , ):
_UpperCAmelCase : Dict = offset
if additional_special_tokens is not None:
if not isinstance(A , A ):
raise TypeError(
F"""additional_special_tokens should be of type {type(A )}, but is"""
F""" {type(A )}""" )
_UpperCAmelCase : Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(A ) , self.offset - 1 )
]
if len(set(A ) ) != len(A ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_UpperCAmelCase : Any = additional_special_tokens_extended
else:
_UpperCAmelCase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
A , tokenizer_file=A , pad_token=A , eos_token=A , unk_token=A , mask_token=A , mask_token_sent=A , offset=A , additional_special_tokens=A , **A , )
_UpperCAmelCase : Optional[Any] = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _A ( self : List[str] , A : Optional[Any] ):
_UpperCAmelCase : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : str , A : List , A : Optional[List] = None , A : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(A )
elif token_ids_a is None:
return self._special_token_mask(A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : Optional[int] , A : Union[str, Any] , A : int=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : Union[str, Any] , A : str , A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 31 | 0 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(UpperCamelCase__ ) , version.parse(UpperCamelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None ):
UpperCAmelCase__ : Any = f'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , UpperCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = requirement, None, None
else:
UpperCAmelCase__ : Optional[int] = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCamelCase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f''' got {requirement}''' )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = match[0]
UpperCAmelCase__ : List[Any] = want_full.split(""",""" ) # there could be multiple requirements
UpperCAmelCase__ : Any = {}
for w in want_range:
UpperCAmelCase__ : Tuple = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , UpperCamelCase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f''' but got {requirement}''' )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = match[0]
UpperCAmelCase__ : Optional[Any] = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
UpperCAmelCase__ : Any = """.""".join([str(UpperCamelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return
# check if any version is installed
try:
UpperCAmelCase__ : Dict = importlib.metadata.version(UpperCamelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(UpperCamelCase__ , UpperCamelCase__ ) | 283 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A ='\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__A ='\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
__A ='\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _UpperCamelCase ( UpperCamelCase__ ):
def remove_articles(UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(UpperCamelCase__ , """ """ , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
UpperCAmelCase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 1_0_0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase__ : List[Any] = Counter(UpperCamelCase__ )
UpperCAmelCase__ : str = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Dict = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase__ : Dict = scount * numref
UpperCAmelCase__ : int = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase__ : Union[str, Any] = ccount * numref
# KEEP
UpperCAmelCase__ : str = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase__ : List[Any] = keepgramcounter_rep & rgramcounter
UpperCAmelCase__ : Dict = sgramcounter_rep & rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Union[str, Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Optional[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase__ : Any = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase__ : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase__ : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase__ : str = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase__ : Optional[Any] = delgramcounter_rep - rgramcounter
UpperCAmelCase__ : List[str] = sgramcounter_rep - rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : Union[str, Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[Any] = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
UpperCAmelCase__ : Tuple = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : int = addtmpscore / len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase__ : int = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = ssent.split(""" """ )
UpperCAmelCase__ : Optional[int] = csent.split(""" """ )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : List[Any] = []
for rsent in rsents:
UpperCAmelCase__ : List[str] = rsent.split(""" """ )
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Dict = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : Any = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Dict = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : str = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Dict = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : int = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : List[Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[Any] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : str = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Any = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[int] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase__ : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase__ : Dict = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase__ : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = "13a" , UpperCamelCase__ = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase__ : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase__ : Tuple = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
UpperCAmelCase__ : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
UpperCAmelCase__ : Dict = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
UpperCAmelCase__ : List[Any] = sentence
if not return_str:
UpperCAmelCase__ : List[str] = normalized_sent.split()
return normalized_sent
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
UpperCAmelCase__ : Optional[int] = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
UpperCAmelCase__ : Optional[int] = sari_score / len(UpperCamelCase__ )
return 1_0_0 * sari_score
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="exp" , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
UpperCAmelCase__ : int = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCAmelCase__ : int = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
UpperCAmelCase__ : int = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def snake_case__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = {}
result.update({"""sari""": compute_sari(sources=_lowerCamelCase , predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""exact""": compute_em(predictions=_lowerCamelCase , references=_lowerCamelCase)})
return result | 283 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str , lowercase : Optional[Any] ) -> Optional[Any]:
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ) -> Any:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(F'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(F'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(F'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''luke'''
def __init__( self : Any , _A : int=5_0267 , _A : str=50_0000 , _A : Dict=768 , _A : int=256 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Tuple=2 , _A : int=0.02 , _A : Any=1e-12 , _A : Dict=True , _A : Optional[Any]=None , _A : List[str]=1 , _A : List[str]=0 , _A : Dict=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = entity_vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = entity_emb_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = use_entity_aware_attention
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 303 | 0 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def snake_case_ ( A_ : Callable ):
'''simple docstring'''
@wraps(A_ )
def _inner_fn(*A_ : Any, **A_ : Union[str, Any] ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.'''), A_, )
return fn(*A_, **A_ )
return _inner_fn
| 355 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : Any = number
while duplicate > 0:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = divmod(A_, 10 )
fact_sum += factorial(A_ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowerCAmelCase__ = int(input('''Enter number: ''').strip())
print(
F"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number."""
)
| 175 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def snake_case_ ( A_ : np.ndarray ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : int = np.shape(A_ )
if rows != columns:
_lowerCamelCase : int = (
'''\'table\' has to be of square shaped array but got a '''
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(A_ )
_lowerCamelCase : Any = np.zeros((rows, columns) )
_lowerCamelCase : List[Any] = np.zeros((rows, columns) )
for i in range(A_ ):
for j in range(A_ ):
_lowerCamelCase : Any = sum(lower[i][k] * upper[k][j] for k in range(A_ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_lowerCamelCase : int = (table[i][j] - total) / upper[j][j]
_lowerCamelCase : List[str] = 1
for j in range(A_, A_ ):
_lowerCamelCase : List[str] = sum(lower[i][k] * upper[k][j] for k in range(A_ ) )
_lowerCamelCase : Tuple = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __magic_name__ ( A ) -> Any:
snake_case = filter(lambda A : p.requires_grad , model.parameters() )
snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ = logging.getLogger(__name__)
def __magic_name__ ( A , A ) -> Optional[Any]:
if metric == "rouge2":
snake_case = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
snake_case = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
snake_case = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
snake_case = ModelCheckpoint(
dirpath=A , filename=A , monitor=F'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __magic_name__ ( A , A ) -> int:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=A , verbose=A , )
class lowerCamelCase ( pl.Callback ):
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> List[str]:
snake_case = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase_ )
@rank_zero_only
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case = od / 'test_results.txt'
snake_case = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
snake_case = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowercase_ )
generations_file.parent.mkdir(exist_ok=lowercase_ )
with open(lowercase_, 'a+' ) as writer:
for key in sorted(lowercase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case = metrics[key]
if isinstance(lowercase_, torch.Tensor ):
snake_case = val.item()
snake_case = F'''{key}: {val:.6f}\n'''
writer.write(lowercase_ )
if not save_generations:
return
if "preds" in metrics:
snake_case = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowercase_ )
@rank_zero_only
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> str:
try:
snake_case = pl_module.model.model.num_parameters()
except AttributeError:
snake_case = pl_module.model.num_parameters()
snake_case = count_trainable_parameters(lowercase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> int:
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(lowercase_, lowercase_, 'test' )
@rank_zero_only
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> int:
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 332 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A ) | 308 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 308 | 1 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
A : Dict = randint(-5000 , 5000 )
return (arr, r)
lowercase : Any = make_dataset()
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for triplet in permutations(snake_case__ , 3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
arr.sort()
A : Tuple = len(snake_case__ )
for i in range(n - 1 ):
A, A : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
A : str = '''
triplet_sum1(*dataset)
'''
A : List[Any] = '''
triplet_sum2(*dataset)
'''
A : Any = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=1_0000 )
A : Dict = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=1_0000 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase : List[str] = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''')
| 311 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : Any = parent
A : List[Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : int = use_input_mask
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Tuple = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[int] = initializer_range
A : Any = use_labels
A : Optional[int] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
),
) : Any = self.prepare_config_and_inputs()
A : Tuple = True
A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = True
A : Tuple = True
A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
# first forward pass
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A, A, A : Optional[int] = self.prepare_config_and_inputs()
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__magic_name__ = (BertGenerationDecoder,) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = BertGenerationEncoderTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs()
A : str = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Dict = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Any = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [[1, 2, 4], [1, 2, 3, 4]]
_snake_case = DisjunctiveConstraint(lowerCAmelCase_ )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase_ ) )
with self.assertRaises(lowerCAmelCase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase_ ):
DisjunctiveConstraint(lowerCAmelCase_ ) # fails here
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [[1, 2, 3], [1, 2, 4]]
_snake_case = DisjunctiveConstraint(lowerCAmelCase_ )
_snake_case , _snake_case , _snake_case = dc.update(1 )
_snake_case = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_snake_case , _snake_case , _snake_case = dc.update(2 )
_snake_case = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case , _snake_case , _snake_case = dc.update(3 )
_snake_case = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_snake_case = DisjunctiveConstraint(lowerCAmelCase_ )
_snake_case , _snake_case , _snake_case = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_snake_case , _snake_case , _snake_case = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case , _snake_case , _snake_case = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_snake_case , _snake_case , _snake_case = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_snake_case , _snake_case , _snake_case = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_snake_case , _snake_case , _snake_case = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case , _snake_case , _snake_case = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 42 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Tuple = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
__snake_case : Optional[Any] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
__snake_case = RobertaTokenizer
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Any="replace" , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : Tuple="</s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : Any="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Optional[int]="<pad>" , lowerCAmelCase_ : str="<mask>" , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : Tuple , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : Optional[Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_ ) != add_prefix_space:
A__ : Union[str, Any] =getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""" ) )
A__ : int =add_prefix_space
A__ : str =pre_tok_class(**lowerCAmelCase_ )
A__ : int =add_prefix_space
A__ : Optional[int] ="""post_processor"""
A__ : Any =getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
if tokenizer_component_instance:
A__ : Tuple =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ : Any =tuple(state["""sep"""] )
if "cls" in state:
A__ : str =tuple(state["""cls"""] )
A__ : Any =False
if state.get("""add_prefix_space""" , lowerCAmelCase_ ) != add_prefix_space:
A__ : Union[str, Any] =add_prefix_space
A__ : int =True
if state.get("""trim_offsets""" , lowerCAmelCase_ ) != trim_offsets:
A__ : Dict =trim_offsets
A__ : int =True
if changes_to_apply:
A__ : List[str] =getattr(lowerCAmelCase_ , state.pop("""type""" ) )
A__ : Any =component_class(**lowerCAmelCase_ )
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
@property
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : int , lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else value
A__ : int =value
def lowercase__ ( self : str , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> BatchEncoding:
'''simple docstring'''
A__ : int =kwargs.get("""is_split_into_words""" , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[int] ) -> BatchEncoding:
'''simple docstring'''
A__ : Dict =kwargs.get("""is_split_into_words""" , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ : Optional[int] =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple=None ) -> Dict:
'''simple docstring'''
A__ : Any =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Any =[self.sep_token_id]
A__ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 366 |
'''simple docstring'''
import torch
from torch import nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : str=False ) -> List[str]:
'''simple docstring'''
super().__init__()
A__ : Any =n_token
A__ : int =d_embed
A__ : Any =d_proj
A__ : Tuple =cutoffs + [n_token]
A__ : Optional[Any] =[0] + self.cutoffs
A__ : Dict =div_val
A__ : str =self.cutoffs[0]
A__ : Optional[Any] =len(self.cutoffs ) - 1
A__ : List[Any] =self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A__ : Any =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
A__ : str =nn.Parameter(torch.zeros(self.n_clusters ) )
A__ : Union[str, Any] =nn.ModuleList()
A__ : Optional[int] =nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
A__ , A__ : Optional[int] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : Tuple =d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
A__ : Optional[int] =keep_order
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
if proj is None:
A__ : Optional[int] =nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A__ : Optional[int] =nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
A__ : Union[str, Any] =nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=False ) -> Optional[int]:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
A__ : Optional[Any] =hidden[..., :-1, :].contiguous()
A__ : List[Any] =labels[..., 1:].contiguous()
A__ : Optional[int] =hidden.view(-1 , hidden.size(-1 ) )
A__ : str =labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
A__ : Optional[int] =hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
A__ : Optional[Any] =self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
A__ : Tuple =labels != -1_00
A__ : int =torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
A__ : Union[str, Any] =(
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A__ : List[Any] =nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
A__ , A__ : Any =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ : Optional[int] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : int =self.out_layers[0].weight[l_idx:r_idx]
A__ : List[str] =self.out_layers[0].bias[l_idx:r_idx]
else:
A__ : List[str] =self.out_layers[i].weight
A__ : Union[str, Any] =self.out_layers[i].bias
if i == 0:
A__ : Tuple =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ : List[str] =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
A__ , A__ , A__ : Tuple =weights[0], biases[0], self.out_projs[0]
A__ : List[Any] =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : int =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
A__ : Union[str, Any] =hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A__ : Union[str, Any] =torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
A__ : Any =0
A__ : Tuple =[0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
A__ , A__ : Tuple =cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A__ : Tuple =(labels >= l_idx) & (labels < r_idx)
A__ : Any =mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A__ : int =labels.index_select(0 , lowerCAmelCase_ ) - l_idx
A__ : List[str] =head_logprob.index_select(0 , lowerCAmelCase_ )
A__ : str =hidden.index_select(0 , lowerCAmelCase_ )
else:
A__ : Optional[Any] =hidden
if i == 0:
if labels is not None:
A__ : Optional[Any] =head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
A__ : Union[str, Any] =head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ : Dict =weights[i], biases[i], self.out_projs[i]
A__ : List[Any] =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[Any] =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : Optional[Any] =self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A__ : Union[str, Any] =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
A__ : List[str] =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A__ : Tuple =logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if self.n_clusters == 0:
A__ : List[str] =self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
A__ , A__ : List[str] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ : int =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : List[str] =self.out_layers[0].weight[l_idx:r_idx]
A__ : List[Any] =self.out_layers[0].bias[l_idx:r_idx]
else:
A__ : Dict =self.out_layers[i].weight
A__ : Any =self.out_layers[i].bias
if i == 0:
A__ : List[str] =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ : Tuple =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
A__ , A__ , A__ : Optional[int] =weights[0], biases[0], self.out_projs[0]
A__ : Any =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =hidden.new_empty((head_logit.size(0 ), self.n_token) )
A__ : Dict =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : Tuple =[0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
A__ , A__ : List[Any] =cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A__ : Tuple =head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ : Any =weights[i], biases[i], self.out_projs[i]
A__ : Dict =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : str =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : str =head_logprob[:, -i] + tail_logprob_i
A__ : List[Any] =logprob_i
return out
| 136 | 0 |
from __future__ import annotations
from collections import namedtuple
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> tuple:
"""simple docstring"""
_lowercase =namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = torch.exp(UpperCAmelCase )
__A = torch.sum(UpperCAmelCase , dim=1 ) # sum of exp(x_i)
__A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCAmelCase ) - B / A
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__A = config.output_attentions
__A = config.output_hidden_states
__A = nn.ModuleList([BertLayer(_A ) for _ in range(config.num_hidden_layers )] )
__A = nn.ModuleList([BertHighway(_A ) for _ in range(config.num_hidden_layers )] )
__A = [-1 for _ in range(config.num_hidden_layers )]
def lowercase_ ( self :Any , _A :List[Any] ) -> Tuple:
'''simple docstring'''
if (type(_A ) is float) or (type(_A ) is int):
for i in range(len(self.early_exit_entropy ) ):
__A = x
else:
__A = x
def lowercase_ ( self :Optional[Any] , _A :List[str] ) -> Dict:
'''simple docstring'''
__A = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase_ ( self :List[Any] , _A :Tuple , _A :Tuple=None , _A :int=None , _A :List[Any]=None , _A :str=None , ) -> Tuple:
'''simple docstring'''
__A = ()
__A = ()
__A = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = layer_module(
_A , _A , head_mask[i] , _A , _A )
__A = layer_outputs[0]
if self.output_attentions:
__A = all_attentions + (layer_outputs[1],)
__A = (hidden_states,)
if self.output_hidden_states:
__A = current_outputs + (all_hidden_states,)
if self.output_attentions:
__A = current_outputs + (all_attentions,)
__A = self.highway[i](_A )
# logits, pooled_output
if not self.training:
__A = highway_exit[0]
__A = entropy(_A )
__A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__A = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_A , i + 1 )
else:
__A = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = (hidden_states,)
if self.output_hidden_states:
__A = outputs + (all_hidden_states,)
if self.output_attentions:
__A = outputs + (all_attentions,)
__A = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Tuple , _A :List[str] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = BertEmbeddings(_A )
__A = DeeBertEncoder(_A )
__A = BertPooler(_A )
self.init_weights()
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowercase_ ( self :Tuple , _A :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A = value
def lowercase_ ( self :int , _A :int ) -> Tuple:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_A )
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :int=None , _A :List[Any]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Any=None , _A :List[str]=None , _A :Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__A = input_ids.size()
elif inputs_embeds is not None:
__A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A = torch.ones(_A , device=_A )
if encoder_attention_mask is None:
__A = torch.ones(_A , device=_A )
if token_type_ids is None:
__A = torch.zeros(_A , dtype=torch.long , device=_A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A = self.get_extended_attention_mask(_A , _A , _A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__A = encoder_attention_mask[:, None, None, :]
__A = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__A = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A = self.get_head_mask(_A , self.config.num_hidden_layers )
__A = self.embeddings(
input_ids=_A , position_ids=_A , token_type_ids=_A , inputs_embeds=_A )
__A = self.encoder(
_A , attention_mask=_A , head_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A = encoder_outputs[0]
__A = self.pooler(_A )
__A = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[Any] , _A :str , _A :List[str] ) -> Optional[int]:
'''simple docstring'''
__A = message
__A = exit_layer # start from 1!
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
__A = BertPooler(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , config.num_labels )
def lowercase_ ( self :List[Any] , _A :Optional[Any] ) -> int:
'''simple docstring'''
__A = encoder_outputs[0]
__A = self.pooler(_A )
# "return" pooler_output
# BertModel
__A = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__A = bmodel_output[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :str , _A :Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = config.num_hidden_layers
__A = DeeBertModel(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :str=None , _A :Optional[int]=None , _A :Any=None , _A :str=None , _A :int=None , _A :Tuple=None , _A :Any=None , _A :List[str]=-1 , _A :Optional[Any]=False , ) -> List[str]:
'''simple docstring'''
__A = self.num_layers
try:
__A = self.bert(
_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__A = outputs[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
__A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__A = e.message
__A = e.exit_layer
__A = outputs[0]
if not self.training:
__A = entropy(_A )
__A = []
__A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__A = []
for highway_exit in outputs[-1]:
__A = highway_exit[0]
if not self.training:
highway_logits_all.append(_A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_A )
if train_highway:
__A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__A = (loss,) + outputs
if not self.training:
__A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 161 | 0 |
"""simple docstring"""
_snake_case : Tuple = "\n# Transformers ์ค์น ๋ฐฉ๋ฒ\n! pip install transformers datasets\n# ๋ง์ง๋ง ๋ฆด๋ฆฌ์ค ๋์ ์์ค์์ ์ค์นํ๋ ค๋ฉด, ์ ๋ช
๋ น์ ์ฃผ์์ผ๋ก ๋ฐ๊พธ๊ณ ์๋ ๋ช
๋ น์ ํด์ ํ์ธ์.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case : Optional[int] = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case : Tuple = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 365 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_snake_case : List[str] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = _TestCommandArgs(dataset=__lowerCamelCase , all_configs=__lowerCamelCase , save_infos=__lowerCamelCase )
__snake_case : List[Any] = TestCommand(*__lowerCamelCase )
test_command.run()
__snake_case : List[Any] = os.path.join(__lowerCamelCase , "README.md" )
assert os.path.exists(__lowerCamelCase )
__snake_case : Optional[Any] = DatasetInfosDict.from_directory(__lowerCamelCase )
__snake_case : List[str] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_3_5_1_5_6_3,
"num_examples": 1_0_0_0_0,
},
{
"name": "validation",
"num_bytes": 2_3_8_4_1_8,
"num_examples": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__snake_case , __snake_case : Tuple = getattr(dataset_infos["default"] , __lowerCamelCase ), getattr(expected_dataset_infos["default"] , __lowerCamelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCamelCase , __lowerCamelCase )
elif key == "splits":
assert list(__lowerCamelCase ) == list(__lowerCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 134 | 0 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_UpperCAmelCase : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_UpperCAmelCase : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_UpperCAmelCase : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_UpperCAmelCase : int = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_UpperCAmelCase : Optional[int] = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_UpperCAmelCase : Any = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_UpperCAmelCase : Dict = tf.keras.preprocessing.image.img_to_array(test_image)
_UpperCAmelCase : int = np.expand_dims(test_image, axis=0)
_UpperCAmelCase : Union[str, Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_UpperCAmelCase : Dict = """Normal"""
if result[0][0] == 1:
_UpperCAmelCase : int = """Abnormality detected"""
| 174 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : torch.FloatTensor
class a__ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__(self , __lowercase = 3 , __lowercase = 3 , __lowercase = ("DownEncoderBlock2D",) , __lowercase = ("UpDecoderBlock2D",) , __lowercase = (64,) , __lowercase = 1 , __lowercase = "silu" , __lowercase = 3 , __lowercase = 32 , __lowercase = 2_56 , __lowercase = 32 , __lowercase = None , __lowercase = 0.1_8_2_1_5 , __lowercase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , )
__lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
__lowerCAmelCase = VectorQuantizer(__lowercase , __lowercase , beta=0.2_5 , remap=__lowercase , sane_index_shape=__lowercase )
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , norm_type=__lowercase , )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = self.encoder(__lowercase )
__lowerCAmelCase = self.quant_conv(__lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowercase )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = False , __lowercase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.quantize(__lowercase )
else:
__lowerCAmelCase = h
__lowerCAmelCase = self.post_quant_conv(__lowercase )
__lowerCAmelCase = self.decoder(__lowercase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(__lowercase ).latents
__lowerCAmelCase = self.decode(__lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
| 174 | 1 |
import functools
def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ):
# Validation
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(UpperCAmelCase__ ) >= 3_6_6:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 | from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_lowerCamelCase : Tuple = {'''allegro/herbert-base-cased''': 5_14}
_lowerCamelCase : Optional[int] = {}
class lowercase ( a ):
lowercase__ : List[str] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = HerbertTokenizer
def __init__( self : Dict , _UpperCamelCase : Any=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : Union[str, Any]="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : List[str]="<mask>" , _UpperCamelCase : Tuple="</s>" , **_UpperCamelCase : Any , ) -> str:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sep_token=_UpperCamelCase , **_UpperCamelCase , )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206 | 0 |
'''simple docstring'''
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : List[Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase__ )
if number < 1:
A_ : int = f'Input value of [number={number}] must be > 0'
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
A_ : Any = int(math.log(number // 3 , 2 ) ) + 2
A_ : List[Any] = [3, 5]
A_ : Optional[int] = 2
A_ : Union[str, Any] = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
lowerCamelCase :Optional[Any] = 0
try:
lowerCamelCase :Tuple = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}") | 206 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[str] = downstream_dict["""projector.weight"""]
snake_case : Dict = downstream_dict["""projector.bias"""]
snake_case : Dict = downstream_dict["""model.post_net.linear.weight"""]
snake_case : List[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
snake_case : str = WavaVecaForAudioFrameClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[Any] = downstream_dict["""model.linear.weight"""]
snake_case : str = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : Any = WavaVecaForXVector.from_pretrained(lowercase ,config=lowercase )
snake_case : str = downstream_dict["""connector.weight"""]
snake_case : Optional[Any] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case : List[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
snake_case : Optional[int] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
snake_case : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Tuple = torch.load(lowercase ,map_location="""cpu""" )
snake_case : Any = checkpoint["""Downstream"""]
snake_case : List[str] = WavaVecaConfig.from_pretrained(lowercase )
snake_case : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
lowercase ,return_attention_mask=lowercase ,do_normalize=lowercase )
snake_case : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case : int = convert_classification(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case : Dict = convert_diarization(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForXVector""" ):
snake_case : Optional[Any] = convert_xvector(lowercase ,lowercase ,lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
snake_case : List[str] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 124 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __lowercase ( _a , _a , _a , _a = 100 , ):
snake_case_ : int = x_start
snake_case_ : Union[str, Any] = fnc(_a )
snake_case_ : Optional[Any] = 0.0
for _ in range(_a ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case_ : Union[str, Any] = (x_end - x_start) / steps + xa
snake_case_ : Tuple = fnc(_a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case_ : Optional[int] = xa
snake_case_ : Union[str, Any] = fxa
return length
if __name__ == "__main__":
def __lowercase ( _a ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
lowercase__ : str = 10
while i <= 10_00_00:
print(f'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 361 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Optional[Any] = """table-transformer"""
_lowerCAmelCase : Any = ["""past_key_values"""]
_lowerCAmelCase : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Any , lowercase_ : Any=True , lowercase_ : Dict=None , lowercase_ : Optional[int]=3 , lowercase_ : Any=100 , lowercase_ : List[str]=6 , lowercase_ : Any=2048 , lowercase_ : Any=8 , lowercase_ : Tuple=6 , lowercase_ : List[Any]=2048 , lowercase_ : List[str]=8 , lowercase_ : List[Any]=0.0 , lowercase_ : str=0.0 , lowercase_ : Dict=True , lowercase_ : Optional[int]="relu" , lowercase_ : Dict=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1.0 , lowercase_ : Tuple=False , lowercase_ : Optional[Any]="sine" , lowercase_ : Union[str, Any]="resnet50" , lowercase_ : List[Any]=True , lowercase_ : List[Any]=False , lowercase_ : Optional[Any]=1 , lowercase_ : Dict=5 , lowercase_ : List[Any]=2 , lowercase_ : Tuple=1 , lowercase_ : List[Any]=1 , lowercase_ : Dict=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=0.1 , **lowercase_ : int , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case_ : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ : List[Any] = backbone_config.get('''model_type''' )
snake_case_ : int = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[str] = config_class.from_dict(lowercase_ )
# set timm attributes to None
snake_case_, snake_case_, snake_case_ : List[str] = None, None, None
snake_case_ : Tuple = use_timm_backbone
snake_case_ : int = backbone_config
snake_case_ : str = num_channels
snake_case_ : List[str] = num_queries
snake_case_ : int = d_model
snake_case_ : List[str] = encoder_ffn_dim
snake_case_ : Any = encoder_layers
snake_case_ : List[Any] = encoder_attention_heads
snake_case_ : Optional[int] = decoder_ffn_dim
snake_case_ : Tuple = decoder_layers
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : Tuple = dropout
snake_case_ : Union[str, Any] = attention_dropout
snake_case_ : Dict = activation_dropout
snake_case_ : Optional[Any] = activation_function
snake_case_ : Optional[Any] = init_std
snake_case_ : str = init_xavier_std
snake_case_ : Any = encoder_layerdrop
snake_case_ : Optional[Any] = decoder_layerdrop
snake_case_ : List[str] = encoder_layers
snake_case_ : Optional[int] = auxiliary_loss
snake_case_ : List[Any] = position_embedding_type
snake_case_ : List[Any] = backbone
snake_case_ : Union[str, Any] = use_pretrained_backbone
snake_case_ : Optional[Any] = dilation
# Hungarian matcher
snake_case_ : Tuple = class_cost
snake_case_ : Any = bbox_cost
snake_case_ : Dict = giou_cost
# Loss coefficients
snake_case_ : Optional[Any] = mask_loss_coefficient
snake_case_ : str = dice_loss_coefficient
snake_case_ : List[str] = bbox_loss_coefficient
snake_case_ : int = giou_loss_coefficient
snake_case_ : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def _snake_case ( self : Any ):
return self.d_model
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = version.parse("""1.11""")
@property
def _snake_case ( self : List[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _snake_case ( self : int ):
return 1E-5
@property
def _snake_case ( self : Optional[int] ):
return 12
| 155 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.