code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from itertools import count
def UpperCamelCase ( snake_case__ : int = 50 ) -> int:
UpperCamelCase : List[str] = [1] * min_block_length
for n in count(snake_case__ ):
fill_count_functions.append(1 )
for block_length in range(snake_case__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
def UpperCamelCase ( snake_case__ : float , snake_case__ : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
UpperCamelCase : Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__UpperCAmelCase = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
__UpperCAmelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Any = MBartTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Union[str, Any] = vocab_file
UpperCamelCase : List[Any] = False if not self.vocab_file else True
UpperCamelCase : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase : Tuple = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Any = src_lang if src_lang is not None else 'en_XX'
UpperCamelCase : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Union[str, Any] = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase : List[Any] = src_lang
UpperCamelCase : List[str] = self(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tgt_lang_id
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "en_XX", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "ro_RO", **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
UpperCamelCase : Tuple = src_lang
UpperCamelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = []
UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Optional[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 1 |
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> bool:
UpperCamelCase : List[str] = len(snake_case__ )
UpperCamelCase : Any = len(snake_case__ )
UpperCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase : List[str] = True
for i in range(snake_case__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase : Tuple = True
if a[i].islower():
UpperCamelCase : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
from __future__ import annotations
__UpperCAmelCase = []
def UpperCamelCase ( snake_case__ : list[list[int]] , snake_case__ : int , snake_case__ : int ) -> bool:
for i in range(len(snake_case__ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , len(snake_case__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCamelCase ( snake_case__ : list[list[int]] , snake_case__ : int ) -> bool:
if row >= len(snake_case__ ):
solution.append(snake_case__ )
printboard(snake_case__ )
print()
return True
for i in range(len(snake_case__ ) ):
if is_safe(snake_case__ , snake_case__ , snake_case__ ):
UpperCamelCase : int = 1
solve(snake_case__ , row + 1 )
UpperCamelCase : Dict = 0
return False
def UpperCamelCase ( snake_case__ : list[list[int]] ) -> None:
for i in range(len(snake_case__ ) ):
for j in range(len(snake_case__ ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
__UpperCAmelCase = 8
__UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 40 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def UpperCamelCase ( ) -> List[str]:
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCamelCase : Union[str, Any] = parser.parse_args()
return args.f
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> None:
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, 'run_glue_deebert.py' )
with patch.object(SCREAMING_SNAKE_CASE_, 'argv', SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(SCREAMING_SNAKE_CASE_, 0.6_66 )
@slow
@require_torch_non_multi_gpu
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(SCREAMING_SNAKE_CASE_ )
| 40 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, 'schedulers/' ) )
UpperCamelCase : int = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_, 'src/diffusers/schedulers/scheduling_ddpm.py' ), os.path.join(self.diffusers_dir, 'schedulers/scheduling_ddpm.py' ), )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Dict = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
UpperCamelCase : str = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCamelCase : List[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCamelCase : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
UpperCamelCase : Any = black.format_str(SCREAMING_SNAKE_CASE_, mode=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = os.path.join(self.diffusers_dir, 'new_code.py' )
with open(SCREAMING_SNAKE_CASE_, 'w', newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_, 'r' ) as f:
self.assertTrue(f.read(), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Any = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Union[str, Any]:
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput', 'DDPMSchedulerOutput', REFERENCE_CODE + '\n', )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput', 'DDPMSchedulerOutput', SCREAMING_SNAKE_CASE_, )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test', 'TestSchedulerOutput', re.sub('DDPM', 'Test', SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with a really long name
UpperCamelCase : Any = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""", F"""{long_class_name}SchedulerOutput""", re.sub('Bert', SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test', 'TestSchedulerOutput', SCREAMING_SNAKE_CASE_, overwrite_result=re.sub('DDPM', 'Test', SCREAMING_SNAKE_CASE_ ), )
| 40 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 1 |
import os
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : str = os.path.join(os.path.dirname(snake_case__ ) , 'num.txt' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ , a__ ):
UpperCAmelCase__ : int = "convnextv2"
def __init__( self, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = num_channels
UpperCamelCase : str = patch_size
UpperCamelCase : Tuple = num_stages
UpperCamelCase : Optional[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : Optional[Any] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Any = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : Optional[Any] = drop_path_rate
UpperCamelCase : int = image_size
UpperCamelCase : Optional[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1, len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_, out_indices=SCREAMING_SNAKE_CASE_, stage_names=self.stage_names )
| 40 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = RobertaTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : Optional[int] = tuple(state['cls'] )
UpperCamelCase : Any = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : List[Any] = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple:
UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = torch.device('''cpu''')
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
def UpperCamelCase ( snake_case__ : str ) -> int:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Optional[int] = dct.pop(snake_case__ )
UpperCamelCase : int = val
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : List[str] = []
for k in state_dict.keys():
UpperCamelCase : Optional[Any] = k
if ".pwconv" in k:
UpperCamelCase : Dict = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
UpperCamelCase : Optional[Any] = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
UpperCamelCase : Optional[Any] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
UpperCamelCase : Optional[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
UpperCamelCase : Dict = k_new.split('.' )
if ls[2].isdigit():
UpperCamelCase : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
UpperCamelCase : int = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : str ) -> List[Any]:
UpperCamelCase : Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase : str = 1000
UpperCamelCase : List[Any] = 'huggingface/label-files'
UpperCamelCase : str = 'imagenet-1k-id2label.json'
UpperCamelCase : List[Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase : List[Any] = idalabel
UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase : Any = [3, 3, 6, 4]
UpperCamelCase : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase : Optional[Any] = [3, 3, 9, 6]
UpperCamelCase : str = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase : Optional[Any] = [4, 3, 10, 5]
UpperCamelCase : Union[str, Any] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase : List[Any] = [4, 4, 12, 6]
UpperCamelCase : List[str] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
UpperCamelCase : Any = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' , check_hash=snake_case__ )
else:
UpperCamelCase : Optional[Any] = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : int = checkpoint
UpperCamelCase : str = create_rename_keys(snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load HuggingFace model
UpperCamelCase : str = SwiftFormerForImageClassification(snake_case__ ).eval()
hf_model.load_state_dict(snake_case__ )
# prepare test inputs
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Optional[int] = ViTImageProcessor.from_pretrained('preprocessor_config' )
UpperCamelCase : int = processor(images=snake_case__ , return_tensors='pt' )
# compare outputs from both models
UpperCamelCase : Tuple = get_expected_output(snake_case__ )
UpperCamelCase : Optional[Any] = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case__ , atol=1E-3 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
__UpperCAmelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 40 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] ) -> Dict:
UpperCamelCase : Optional[Any] = k_size // 2
UpperCamelCase , UpperCamelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : str = 1 / (2 * pi * sigma) * exp(-(square(snake_case__ ) + square(snake_case__ )) / (2 * square(snake_case__ )) )
return g
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase : List[str] = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : Optional[Any] = height - k_size + 1
UpperCamelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[str] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Dict = 0
for i, j in product(range(snake_case__ ) , range(snake_case__ ) ):
UpperCamelCase : Tuple = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(snake_case__ , snake_case__ )
UpperCamelCase : Optional[int] = ravel(snake_case__ )
# reshape and get the dst image
UpperCamelCase : Dict = dot(snake_case__ , snake_case__ ).reshape(snake_case__ , snake_case__ ).astype(snake_case__ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
__UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 40 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : int = CpmAntTokenizer
UpperCAmelCase__ : List[str] = False
def snake_case_ ( self ) -> List[str]:
super().setUp()
UpperCamelCase : int = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[Any] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
UpperCamelCase : List[str] = '今天天气真好!'
UpperCamelCase : int = ['今天', '天气', '真', '好', '!']
UpperCamelCase : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = '今天天气真好!'
UpperCamelCase : Optional[Any] = [tokenizer.bos_token] + tokens
UpperCamelCase : Union[str, Any] = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 1 |
import requests
__UpperCAmelCase = '''YOUR API KEY'''
def UpperCamelCase ( snake_case__ : str , snake_case__ : str = giphy_api_key ) -> list:
UpperCamelCase : Optional[int] = '+'.join(query.split() )
UpperCamelCase : List[str] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
UpperCamelCase : Tuple = requests.get(snake_case__ ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 40 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def UpperCamelCase ( snake_case__ : Dict ) -> Any:
return int(x / 2**20 )
class lowerCAmelCase_ :
def __enter__( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCamelCase : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self, *SCREAMING_SNAKE_CASE_ ) -> List[Any]:
gc.collect()
torch.cuda.empty_cache()
UpperCamelCase : Union[str, Any] = torch.cuda.memory_allocated()
UpperCamelCase : Dict = torch.cuda.max_memory_allocated()
UpperCamelCase : Tuple = bamb(self.end - self.begin )
UpperCamelCase : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" , snake_case__ : int = 320 , snake_case__ : int = 160 , ) -> Any:
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(snake_case__ )
UpperCamelCase : Optional[int] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase : Dict = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCamelCase : int = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCamelCase : int = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def UpperCamelCase ( snake_case__ : Any , snake_case__ : int ) -> Any:
# Initialize accelerator
UpperCamelCase : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Optional[Any] = config['lr']
UpperCamelCase : str = int(config['num_epochs'] )
UpperCamelCase : int = int(config['seed'] )
UpperCamelCase : Dict = int(config['batch_size'] )
UpperCamelCase : str = args.model_name_or_path
set_seed(snake_case__ )
UpperCamelCase , UpperCamelCase : int = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
UpperCamelCase : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase : List[str] = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : List[Any] = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
UpperCamelCase : List[str] = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase : Union[str, Any] = 0
# Now we train the model
UpperCamelCase : Union[str, Any] = {}
for epoch in range(snake_case__ , snake_case__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case__ ):
UpperCamelCase : str = model(**snake_case__ )
UpperCamelCase : List[str] = outputs.loss
UpperCamelCase : int = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCamelCase : Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def UpperCamelCase ( ) -> str:
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=snake_case__ , default=snake_case__ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=snake_case__ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=snake_case__ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=1 , help='Number of train epochs.' , )
UpperCamelCase : List[Any] = parser.parse_args()
UpperCamelCase : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 40 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__ : Tuple="" ) -> str:
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' )
sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 )
UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) )
UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = 'Hey!'
UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''T''')
class lowerCAmelCase_ ( Generic[T] ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Tuple = data
UpperCamelCase : List[Any] = self
UpperCamelCase : int = 0
class lowerCAmelCase_ ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase : dict[T, DisjointSetTreeNode[T]] = {}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
# create a new set with x as its member
UpperCamelCase : List[Any] = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase : Optional[int] = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase : Tuple = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase : Dict = nodea
else:
UpperCamelCase : Dict = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ), self.find_set(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase : dict[T, dict[T, int]] = {}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase : Any = {}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = weight
UpperCamelCase : List[str] = weight
def snake_case_ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase : List[str] = []
UpperCamelCase : Optional[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
UpperCamelCase : int = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
UpperCamelCase : Dict = 0
UpperCamelCase : Tuple = 0
UpperCamelCase : List[Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = edges[index]
index += 1
UpperCamelCase : Dict = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return graph
| 40 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
def UpperCamelCase ( snake_case__ : List[str] ) -> Optional[int]:
UpperCamelCase : int = [0] * len(snake_case__ )
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Optional[Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Tuple = queue.pop(0 )
cnt += 1
topo.append(snake_case__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(snake_case__ )
if cnt != len(snake_case__ ):
print('Cycle exists' )
else:
print(snake_case__ )
# Adjacency List of Graph
__UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 40 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 1 |
from math import pi
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 40 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ) -> int:
UpperCamelCase : Optional[int] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(snake_case__ )}"""
| 40 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
__UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
__UpperCAmelCase = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__UpperCAmelCase = model.state_dict()
__UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
__UpperCAmelCase = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
__UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__UpperCAmelCase = state_dict['''cls.predictions.decoder.weight''']
__UpperCAmelCase = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F"""cls.predictions.transform.dense.{w}"""]
__UpperCAmelCase = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 40 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
import os
from collections.abc import Iterator
def UpperCamelCase ( snake_case__ : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(snake_case__ ):
UpperCamelCase : Optional[Any] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case__ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case__ , snake_case__ ).lstrip('./' )
def UpperCamelCase ( snake_case__ : List[Any] ) -> List[str]:
return F"""{i * " "}*""" if i else "\n##"
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> str:
UpperCamelCase : str = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case__ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(snake_case__ )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def UpperCamelCase ( snake_case__ : str = "." ) -> None:
UpperCamelCase : Any = ''
for filepath in sorted(good_file_paths(snake_case__ ) ):
UpperCamelCase , UpperCamelCase : str = os.path.split(snake_case__ )
if filepath != old_path:
UpperCamelCase : str = print_path(snake_case__ , snake_case__ )
UpperCamelCase : int = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCamelCase : str = F"""{filepath}/{filename}""".replace(' ' , '%20' )
UpperCamelCase : Tuple = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F"""{md_prefix(snake_case__ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 40 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Dict = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Optional[Any] = "BlipImageProcessor"
UpperCAmelCase__ : Optional[Any] = "AutoTokenizer"
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.image_processor
def __call__( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 0, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCamelCase : Union[str, Any] = self.tokenizer
UpperCamelCase : Optional[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, stride=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, return_overflowing_tokens=SCREAMING_SNAKE_CASE_, return_special_tokens_mask=SCREAMING_SNAKE_CASE_, return_offsets_mapping=SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_, return_length=SCREAMING_SNAKE_CASE_, verbose=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
return text_encoding
# add pixel_values
UpperCamelCase : Dict = self.image_processor(SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_ )
if text is not None:
UpperCamelCase : Any = self.tokenizer(
text=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, stride=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, return_overflowing_tokens=SCREAMING_SNAKE_CASE_, return_special_tokens_mask=SCREAMING_SNAKE_CASE_, return_offsets_mapping=SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_, return_length=SCREAMING_SNAKE_CASE_, verbose=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE_ )
return encoding_image_processor
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
def UpperCamelCase ( snake_case__ : str ) -> list:
UpperCamelCase : Optional[int] = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
UpperCamelCase : str = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase : List[Any] = j
return prefix_result
def UpperCamelCase ( snake_case__ : str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase ( ) -> Any:
UpperCamelCase : Optional[int] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=snake_case__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=snake_case__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=snake_case__ )
return parser.parse_args()
def UpperCamelCase ( ) -> Optional[int]:
UpperCamelCase : Optional[Any] = parse_args()
# Import training_script as a module.
UpperCamelCase : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : Dict = script_fpath.stem
UpperCamelCase : Optional[int] = importlib.import_module(snake_case__ )
# Patch sys.argv
UpperCamelCase : Dict = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 40 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 | 1 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : int = PriorTransformer
UpperCAmelCase__ : Optional[Any] = "hidden_states"
@property
def snake_case_ ( self ) -> Any:
UpperCamelCase : List[str] = 4
UpperCamelCase : Dict = 8
UpperCamelCase : int = 7
UpperCamelCase : Dict = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = 4
UpperCamelCase : int = 8
UpperCamelCase : Tuple = 7
UpperCamelCase : Tuple = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case_ ( self ) -> Optional[Any]:
return (4, 8)
@property
def snake_case_ ( self ) -> str:
return (4, 8)
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
UpperCamelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self ) -> str:
UpperCamelCase , UpperCamelCase : Dict = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy', output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ), 0 )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case_ ( self ) -> int:
UpperCamelCase , UpperCamelCase : Optional[int] = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase : Union[str, Any] = self.model_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Tuple = [*signature.parameters.keys()]
UpperCamelCase : List[Any] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[Any] = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_, 'set_default_attn_processor' ):
model.set_default_attn_processor()
UpperCamelCase : int = self.get_dummy_seed_input()
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Dict = output[0, :5].flatten().cpu()
print(SCREAMING_SNAKE_CASE_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCamelCase : Optional[int] = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, rtol=1e-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=77, SCREAMING_SNAKE_CASE_=0 ) -> Any:
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = batch_size
UpperCamelCase : Any = embedding_dim
UpperCamelCase : Any = num_embeddings
UpperCamelCase : Dict = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior', subfolder='prior' )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = self.get_dummy_seed_input(seed=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase : List[Any] = model(**SCREAMING_SNAKE_CASE_ )[0]
assert list(sample.shape ) == [1, 768]
UpperCamelCase : List[str] = sample[0, :8].flatten().cpu()
print(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 )
| 40 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 1 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> str:
UpperCamelCase : Any = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : int = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase : str = 'fp16'
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase : List[str] = 'fp16'
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Dict:
# pass variant but use the non-variant filenames
UpperCamelCase : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
UpperCamelCase : List[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase : Optional[int] = 'fp16'
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
UpperCamelCase : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
# pass variant but use the non-variant filenames
UpperCamelCase : Union[str, Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
UpperCamelCase : str = 'fp16'
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase : Optional[int] = 'fp16'
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
| 40 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = "yolos"
def __init__( self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=[512, 864], SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=100, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.1, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Tuple = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : Tuple = image_size
UpperCamelCase : int = patch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : List[str] = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : Tuple = use_mid_position_embeddings
UpperCamelCase : Tuple = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Any = class_cost
UpperCamelCase : Optional[int] = bbox_cost
UpperCamelCase : str = giou_cost
# Loss coefficients
UpperCamelCase : List[str] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = eos_coefficient
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[Any] = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ) -> float:
return 1e-4
@property
def snake_case_ ( self ) -> int:
return 12
| 40 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
__UpperCAmelCase = '''1'''
__UpperCAmelCase = '''0'''
__UpperCAmelCase = '''1'''
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
__UpperCAmelCase = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
__UpperCAmelCase = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
__UpperCAmelCase = ort.RunOptions()
__UpperCAmelCase = 128
__UpperCAmelCase = 1
__UpperCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
__UpperCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
__UpperCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = 2_000
__UpperCAmelCase = {}
for iter in range(max_iters):
__UpperCAmelCase = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_000 / max_iters))
| 40 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = RobertaTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : Optional[int] = tuple(state['cls'] )
UpperCamelCase : Any = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : List[Any] = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple:
UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40 | 1 |
def UpperCamelCase ( snake_case__ : list[list[int | float]] ) -> int:
UpperCamelCase : Tuple = len(snake_case__ )
UpperCamelCase : Tuple = len(matrix[0] )
UpperCamelCase : Tuple = min(snake_case__ , snake_case__ )
for row in range(snake_case__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , snake_case__ ):
UpperCamelCase : Dict = matrix[col][row] / matrix[row][row]
for i in range(snake_case__ , snake_case__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase : Optional[int] = True
for i in range(row + 1 , snake_case__ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase : Union[str, Any] = matrix[i], matrix[row]
UpperCamelCase : List[str] = False
break
if reduce:
rank -= 1
for i in range(snake_case__ ):
UpperCamelCase : Optional[Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__UpperCAmelCase = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1 ) -> List[str]:
UpperCamelCase : int = tokenizer
UpperCamelCase : List[str] = dataset
UpperCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) if n_tasks is None else n_tasks
UpperCamelCase : str = n_copies
def __iter__( self ) -> Tuple:
UpperCamelCase : List[str] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
UpperCamelCase : Any = self.tokenizer(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Any = start_length
UpperCamelCase : Optional[int] = eof_strings
UpperCamelCase : List[Any] = tokenizer
def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCamelCase : List[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( snake_case__ : Any ) -> List[str]:
UpperCamelCase : Tuple = re.split('(%s)' % '|'.join(snake_case__ ) , snake_case__ )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any]=20 , **snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : Union[str, Any] = defaultdict(snake_case__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(snake_case__ ) ):
with torch.no_grad():
UpperCamelCase : Optional[int] = batch['ids'].shape[-1]
UpperCamelCase : Union[str, Any] = accelerator.unwrap_model(snake_case__ ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=snake_case__ , **snake_case__ )
# each task is generated batch_size times
UpperCamelCase : Optional[Any] = batch['task_id'].repeat(snake_case__ )
UpperCamelCase : Any = accelerator.pad_across_processes(
snake_case__ , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCamelCase , UpperCamelCase : Any = accelerator.gather((generated_tokens, generated_tasks) )
UpperCamelCase : Optional[int] = generated_tokens.cpu().numpy()
UpperCamelCase : str = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(snake_case__ , snake_case__ ):
gen_token_dict[task].append(snake_case__ )
UpperCamelCase : Tuple = [[] for _ in range(snake_case__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCamelCase : Optional[Any] = tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
code_gens[task].append(remove_last_block(snake_case__ ) )
return code_gens
def UpperCamelCase ( ) -> Any:
# Setup configuration
UpperCamelCase : Optional[Any] = HfArgumentParser(snake_case__ )
UpperCamelCase : Optional[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCamelCase : Dict = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCamelCase : Optional[int] = 'false'
if args.num_workers is None:
UpperCamelCase : Union[str, Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCamelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=snake_case__ )
# Load model and tokenizer
UpperCamelCase : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase : List[str] = tokenizer.eos_token
UpperCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCamelCase : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , snake_case__ , snake_case__ )] ),
}
# Load evaluation dataset and metric
UpperCamelCase : Dict = load_dataset('openai_humaneval' )
UpperCamelCase : List[str] = load_metric('code_eval' )
UpperCamelCase : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
UpperCamelCase : Union[str, Any] = args.n_samples // args.batch_size
UpperCamelCase : int = TokenizedDataset(snake_case__ , human_eval['test'] , n_copies=snake_case__ , n_tasks=snake_case__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCamelCase : Tuple = DataLoader(snake_case__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCamelCase : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
UpperCamelCase , UpperCamelCase : Tuple = accelerator.prepare(snake_case__ , snake_case__ )
UpperCamelCase : List[Any] = complete_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , n_tasks=snake_case__ , batch_size=args.batch_size , **snake_case__ , )
if accelerator.is_main_process:
UpperCamelCase : int = []
for task in tqdm(range(snake_case__ ) ):
UpperCamelCase : Tuple = human_eval['test'][task]['test']
UpperCamelCase : str = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCamelCase , UpperCamelCase : str = code_eval_metric.compute(
references=snake_case__ , predictions=snake_case__ , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(snake_case__ , snake_case__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 1 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__UpperCAmelCase = {'''mgp-str''': 27}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="[GO]", SCREAMING_SNAKE_CASE_="[GO]", SCREAMING_SNAKE_CASE_="[s]", SCREAMING_SNAKE_CASE_="[GO]", **SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
with open(SCREAMING_SNAKE_CASE_, encoding='utf-8' ) as vocab_handle:
UpperCamelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def snake_case_ ( self ) -> Any:
return len(self.vocab )
def snake_case_ ( self ) -> List[Any]:
return dict(self.vocab, **self.added_tokens_encoder )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : List[Any] = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.vocab.get(SCREAMING_SNAKE_CASE_, self.vocab.get(self.unk_token ) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) )
return
UpperCamelCase : Dict = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(SCREAMING_SNAKE_CASE_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=SCREAMING_SNAKE_CASE_, ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
return (vocab_file,)
| 40 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 1 |
import operator as op
def UpperCamelCase ( snake_case__ : Tuple ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : int = lambda snake_case__ , snake_case__ : int(x / y ) # noqa: E731 integer division operation
UpperCamelCase : Dict = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(snake_case__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(snake_case__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(snake_case__ ) , sep=' | ' )
else:
UpperCamelCase : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(snake_case__ ) , sep=' | ' )
UpperCamelCase : Tuple = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(snake_case__ ) , sep=' | ' )
stack.append(
str(opr[x](int(snake_case__ ) , int(snake_case__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(snake_case__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCAmelCase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 40 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( a__ , a__ ):
@register_to_config
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None ) -> Union[str, Any]:
super().__init__()
UpperCamelCase : int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase : Dict = torch.zeros(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Any = torch.nn.Parameter(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Dict:
super().__init__()
self.register_modules(
vqvae=SCREAMING_SNAKE_CASE_, transformer=SCREAMING_SNAKE_CASE_, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else 1
# get prompt text embeddings
UpperCamelCase : Union[str, Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt', )
UpperCamelCase : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase : Union[str, Any] = prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase : str = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase : Dict = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(SCREAMING_SNAKE_CASE_, 1, 1 )
else:
UpperCamelCase : Optional[Any] = [''] * batch_size
UpperCamelCase : Tuple = text_input_ids.shape[-1]
UpperCamelCase : Union[str, Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_, padding='max_length', max_length=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, return_tensors='pt', )
UpperCamelCase : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase : Tuple = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=SCREAMING_SNAKE_CASE_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : str = negative_prompt_embeds.shape[1]
UpperCamelCase : List[Any] = negative_prompt_embeds.repeat(1, SCREAMING_SNAKE_CASE_, 1 )
UpperCamelCase : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt, SCREAMING_SNAKE_CASE_, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : int = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 100, SCREAMING_SNAKE_CASE_ = 5.0, SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "pil", SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1, ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = 1
elif isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}""" )
UpperCamelCase : Optional[Any] = batch_size * num_images_per_prompt
UpperCamelCase : Any = guidance_scale > 1.0
UpperCamelCase : Tuple = self._encode_prompt(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(SCREAMING_SNAKE_CASE_ )}.""" )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase : Any = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase : str = self.transformer.num_vector_embeds - 1
UpperCamelCase : Dict = torch.full(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
UpperCamelCase : int = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_, device=self.device )
UpperCamelCase : Any = self.scheduler.timesteps.to(self.device )
UpperCamelCase : List[Any] = latents
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase : List[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase : List[str] = self.transformer(SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, timestep=SCREAMING_SNAKE_CASE_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : Optional[Any] = model_output.chunk(2 )
UpperCamelCase : Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(SCREAMING_SNAKE_CASE_, dim=1, keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.truncate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase : str = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Optional[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE_, timestep=SCREAMING_SNAKE_CASE_, sample=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self.vqvae.config.vq_embed_dim
UpperCamelCase : str = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase : Tuple = self.vqvae.quantize.get_codebook_entry(SCREAMING_SNAKE_CASE_, shape=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self.vqvae.decode(SCREAMING_SNAKE_CASE_, force_not_quantize=SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase : int = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase : Dict = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase : Any = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> torch.FloatTensor:
UpperCamelCase , UpperCamelCase : Optional[int] = torch.sort(SCREAMING_SNAKE_CASE_, 1, descending=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.exp(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase : Union[str, Any] = torch.full_like(keep_mask[:, 0:1, :], SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = torch.cat((all_true, keep_mask), dim=1 )
UpperCamelCase : str = keep_mask[:, :-1, :]
UpperCamelCase : List[str] = keep_mask.gather(1, indices.argsort(1 ) )
UpperCamelCase : str = log_p_x_0.clone()
UpperCamelCase : Dict = -torch.inf # -inf = log(0)
return rv
| 40 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__ : Tuple="" ) -> str:
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' )
sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 )
UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) )
UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = 'Hey!'
UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Optional[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=[4, 4, 4], SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1e-9, SCREAMING_SNAKE_CASE_="mean", SCREAMING_SNAKE_CASE_="relative_shift", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : Dict = vocab_size
UpperCamelCase : str = block_sizes
UpperCamelCase : Optional[Any] = [1] * len(SCREAMING_SNAKE_CASE_ ) if block_repeats is None else block_repeats
assert len(SCREAMING_SNAKE_CASE_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCamelCase : Tuple = num_decoder_layers
UpperCamelCase : Union[str, Any] = d_model
UpperCamelCase : int = n_head
UpperCamelCase : Optional[int] = d_head
UpperCamelCase : Optional[int] = d_inner
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : Dict = activation_dropout
UpperCamelCase : str = initializer_range
UpperCamelCase : List[str] = initializer_std
UpperCamelCase : Optional[int] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
UpperCamelCase : Union[str, Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
UpperCamelCase : Any = attention_type
UpperCamelCase : Dict = separate_cls
UpperCamelCase : Dict = truncate_seq
UpperCamelCase : Any = pool_q_only
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> List[str]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def snake_case_ ( self ) -> List[Any]:
return len(self.block_sizes )
@num_blocks.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 40 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
__UpperCAmelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCamelCase ( ) -> None:
UpperCamelCase : str = input('Enter message: ' )
UpperCamelCase : Optional[int] = input('Enter key [alphanumeric]: ' )
UpperCamelCase : Optional[int] = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
UpperCamelCase : Optional[Any] = 'encrypt'
UpperCamelCase : List[Any] = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('d' ):
UpperCamelCase : Tuple = 'decrypt'
UpperCamelCase : Optional[int] = decrypt_message(snake_case__ , snake_case__ )
print(F"""\n{mode.title()}ed message:""" )
print(snake_case__ )
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> str:
return translate_message(snake_case__ , snake_case__ , 'encrypt' )
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> str:
return translate_message(snake_case__ , snake_case__ , 'decrypt' )
def UpperCamelCase ( snake_case__ : str , snake_case__ : str , snake_case__ : str ) -> str:
UpperCamelCase : Any = []
UpperCamelCase : List[Any] = 0
UpperCamelCase : Optional[int] = key.upper()
for symbol in message:
UpperCamelCase : List[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
UpperCamelCase : Tuple = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 40 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( snake_case__ : str , snake_case__ : float | Decimal , snake_case__ : float = 10**-10 ) -> float:
UpperCamelCase : Tuple = a
while True:
UpperCamelCase : Any = Decimal(snake_case__ ) - (
Decimal(eval(snake_case__ ) ) / Decimal(eval(str(diff(snake_case__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case__ ) ) < precision: # noqa: S307
return float(snake_case__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 40 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 | 1 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[Any] = order
# a_{0} ... a_{k}
UpperCamelCase : Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase : Optional[int] = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase : Optional[int] = [0.0] * self.order
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) < self.order:
UpperCamelCase : List[Any] = [1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE_ ) != self.order + 1:
UpperCamelCase : Any = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE_ )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != self.order + 1:
UpperCamelCase : Optional[Any] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE_ )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = a_coeffs
UpperCamelCase : List[str] = b_coeffs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> float:
UpperCamelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1, self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase : List[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase : Tuple = self.input_history[:-1]
UpperCamelCase : Optional[Any] = self.output_history[:-1]
UpperCamelCase : Optional[int] = sample
UpperCamelCase : Dict = result
return result
| 40 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Union[str, Any]=8 ) -> Optional[int]:
UpperCamelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCamelCase ( snake_case__ : Any , snake_case__ : int=512 , snake_case__ : List[str]=512 ) -> Dict:
UpperCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCamelCase : str = np.array(pil_image.convert('RGB' ) )
UpperCamelCase : Dict = arr.astype(np.floataa ) / 127.5 - 1
UpperCamelCase : int = np.transpose(snake_case__ , [2, 0, 1] )
UpperCamelCase : Optional[Any] = torch.from_numpy(snake_case__ ).unsqueeze(0 )
return image
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, movq=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# get the original timestep using init_timestep
UpperCamelCase : str = min(int(num_inference_steps * strength ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = max(num_inference_steps - init_timestep, 0 )
UpperCamelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
if not isinstance(SCREAMING_SNAKE_CASE_, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE_ )}""" )
UpperCamelCase : Tuple = image.to(device=SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase : int = image
else:
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
else:
UpperCamelCase : Optional[Any] = self.movq.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self.movq.config.scaling_factor * init_latents
UpperCamelCase : Optional[int] = torch.cat([init_latents], dim=0 )
UpperCamelCase : int = init_latents.shape
UpperCamelCase : Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, device=SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCamelCase : Optional[int] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = init_latents
return latents
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCamelCase : Dict = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> Any:
if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCamelCase : Optional[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu', silence_dtype_warnings=SCREAMING_SNAKE_CASE_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase : Any = cpu_offload_with_hook(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, prev_module_hook=SCREAMING_SNAKE_CASE_ )
# We'll offload the last model manually.
UpperCamelCase : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self ) -> Optional[Any]:
if not hasattr(self.unet, '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_, '_hf_hook' )
and hasattr(module._hf_hook, 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 100, SCREAMING_SNAKE_CASE_ = 4.0, SCREAMING_SNAKE_CASE_ = 0.3, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "pil", SCREAMING_SNAKE_CASE_ = True, ) -> Optional[Any]:
UpperCamelCase : str = self._execution_device
UpperCamelCase : Union[str, Any] = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : int = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : List[str] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : Optional[Any] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE_, (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCamelCase : Any = torch.cat([prepare_image(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for i in image], dim=0 )
UpperCamelCase : List[str] = image.to(dtype=image_embeds.dtype, device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.movq.encode(SCREAMING_SNAKE_CASE_ )['latents']
UpperCamelCase : List[Any] = latents.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_, device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : str = self.get_timesteps(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase , UpperCamelCase : Union[str, Any] = downscale_height_and_width(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.movq_scale_factor )
UpperCamelCase : Optional[int] = self.prepare_latents(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, image_embeds.dtype, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : List[Any] = {'image_embeds': image_embeds}
UpperCamelCase : Optional[Any] = self.unet(
sample=SCREAMING_SNAKE_CASE_, timestep=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, added_cond_kwargs=SCREAMING_SNAKE_CASE_, return_dict=SCREAMING_SNAKE_CASE_, )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : Tuple = noise_pred.split(latents.shape[1], dim=1 )
UpperCamelCase , UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase : Any = variance_pred.chunk(2 )
UpperCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase : Any = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Dict = self.scheduler.step(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, )[0]
# post-processing
UpperCamelCase : int = self.movq.decode(SCREAMING_SNAKE_CASE_, force_not_quantize=SCREAMING_SNAKE_CASE_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase : Dict = image * 0.5 + 0.5
UpperCamelCase : Optional[Any] = image.clamp(0, 1 )
UpperCamelCase : str = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 40 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=None, ) -> Tuple:
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Union[str, Any] = seq_length
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : int = projection_dim
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : Optional[int] = dropout
UpperCamelCase : Union[str, Any] = attention_dropout
UpperCamelCase : Any = max_position_embeddings
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : Union[str, Any] = bos_token_id
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCamelCase : int = input_mask.numpy()
UpperCamelCase , UpperCamelCase : Union[str, Any] = input_mask.shape
UpperCamelCase : Optional[int] = np.random.randint(1, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = 1
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : int = TFBlipTextModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Dict = False
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Union[str, Any] = BlipTextModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> str:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
pass
def snake_case_ ( self ) -> str:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def snake_case_ ( self ) -> int:
pass
@slow
def snake_case_ ( self ) -> Tuple:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=True ) -> int:
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE_ )
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
def UpperCamelCase ( snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase ( ) -> Any:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Any = inspect.getfile(accelerate.test_utils )
UpperCamelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCamelCase : List[str] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[str] = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
UpperCamelCase : Any = [sys.executable] + distributed_args
execute_subprocess_async(SCREAMING_SNAKE_CASE_, env=os.environ.copy() )
| 40 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> str:
super().__init__(
features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, keep_in_memory=SCREAMING_SNAKE_CASE_, streaming=SCREAMING_SNAKE_CASE_, num_proc=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = Generator(
cache_dir=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, gen_kwargs=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Union[str, Any]:
# Build iterable dataset
if self.streaming:
UpperCamelCase : Tuple = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
UpperCamelCase : List[Any] = None
UpperCamelCase : Tuple = None
UpperCamelCase : Any = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_, download_mode=SCREAMING_SNAKE_CASE_, verification_mode=SCREAMING_SNAKE_CASE_, base_path=SCREAMING_SNAKE_CASE_, num_proc=self.num_proc, )
UpperCamelCase : List[str] = self.builder.as_dataset(
split='train', verification_mode=SCREAMING_SNAKE_CASE_, in_memory=self.keep_in_memory )
return dataset
| 40 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
def UpperCamelCase ( snake_case__ : int ) -> int:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Input value must be an \'int\' type' )
UpperCamelCase : Optional[int] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ['''model.decoder.embed_positions.weights''']
def UpperCamelCase ( snake_case__ : int ) -> Tuple:
if "emb" in name:
UpperCamelCase : Any = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
UpperCamelCase : Optional[Any] = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
UpperCamelCase : Dict = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
UpperCamelCase : Optional[Any] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
UpperCamelCase : List[Any] = name.replace('linear2' , 'fc2' )
if "norm1" in name:
UpperCamelCase : Any = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
UpperCamelCase : List[str] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
UpperCamelCase : int = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
UpperCamelCase : Dict = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
UpperCamelCase : List[Any] = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase : List[str] = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def UpperCamelCase ( snake_case__ : OrderedDict , snake_case__ : int ) -> Tuple[Dict, Dict]:
UpperCamelCase : int = list(state_dict.keys() )
UpperCamelCase : Dict = {}
for key in keys:
UpperCamelCase : List[str] = state_dict.pop(snake_case__ )
UpperCamelCase : str = rename_keys(snake_case__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase : Any = val[:hidden_size, :]
UpperCamelCase : Tuple = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase : Union[str, Any] = val
else:
UpperCamelCase : Optional[Any] = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase ( snake_case__ : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
UpperCamelCase : Tuple = 1024
UpperCamelCase : Union[str, Any] = 24
UpperCamelCase : Any = 16
elif checkpoint == "medium":
UpperCamelCase : str = 1536
UpperCamelCase : List[str] = 48
UpperCamelCase : Optional[Any] = 24
elif checkpoint == "large":
UpperCamelCase : Tuple = 2048
UpperCamelCase : Dict = 48
UpperCamelCase : List[str] = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCamelCase : Union[str, Any] = MusicgenDecoderConfig(
hidden_size=snake_case__ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , )
return config
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : Tuple="cpu" ) -> List[str]:
UpperCamelCase : List[str] = MusicGen.get_pretrained(snake_case__ , device=snake_case__ )
UpperCamelCase : Any = decoder_config_from_checkpoint(snake_case__ )
UpperCamelCase : int = fairseq_model.lm.state_dict()
UpperCamelCase , UpperCamelCase : List[str] = rename_state_dict(
snake_case__ , hidden_size=decoder_config.hidden_size )
UpperCamelCase : Optional[int] = TaEncoderModel.from_pretrained('t5-base' )
UpperCamelCase : int = EncodecModel.from_pretrained('facebook/encodec_32khz' )
UpperCamelCase : List[Any] = MusicgenForCausalLM(snake_case__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase , UpperCamelCase : Optional[int] = decoder.load_state_dict(snake_case__ , strict=snake_case__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(snake_case__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCamelCase : int = MusicgenForConditionalGeneration(text_encoder=snake_case__ , audio_encoder=snake_case__ , decoder=snake_case__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case__ )
# check we can do a forward pass
UpperCamelCase : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCamelCase : str = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(input_ids=snake_case__ , decoder_input_ids=snake_case__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained('t5-base' )
UpperCamelCase : Any = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
UpperCamelCase : List[Any] = MusicgenProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
# set the appropriate bos/pad token ids
UpperCamelCase : Any = 2048
UpperCamelCase : Any = 2048
# set other default generation config params
UpperCamelCase : Dict = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase : str = True
UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(snake_case__ )
processor.push_to_hub(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__UpperCAmelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 40 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
__UpperCAmelCase = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
__UpperCAmelCase = '''▁'''
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Any = vocab_file
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : int = len(self.sp_model ) - 1
UpperCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
UpperCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ) -> List[str]:
return len(self.sp_model )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : str = []
UpperCamelCase : Optional[Any] = ''
UpperCamelCase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
UpperCamelCase : Optional[int] = True
UpperCamelCase : Optional[int] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self ) -> str:
UpperCamelCase : Dict = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Dict = {}
UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : int = BlenderbotSmallTokenizer
UpperCAmelCase__ : Any = False
def snake_case_ ( self ) -> Any:
super().setUp()
UpperCamelCase : Dict = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
UpperCamelCase : int = dict(zip(SCREAMING_SNAKE_CASE_, range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : List[str] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
UpperCamelCase : int = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
UpperCamelCase : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : List[str] = 'adapt act apte'
UpperCamelCase : List[Any] = 'adapt act apte'
return input_text, output_text
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase : List[Any] = 'adapt act apte'
UpperCamelCase : List[Any] = ['adapt', 'act', 'ap@@', 'te']
UpperCamelCase : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase : Union[str, Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
UpperCamelCase : List[str] = 'I am a small frog.'
UpperCamelCase : Optional[Any] = tok([src_text], padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_ )['input_ids']
UpperCamelCase : int = tok.batch_decode(SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
UpperCamelCase : Tuple = 'I am a small frog .'
UpperCamelCase : List[str] = '.'
UpperCamelCase : str = tok(SCREAMING_SNAKE_CASE_ )['input_ids']
UpperCamelCase : List[Any] = tok(SCREAMING_SNAKE_CASE_ )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 40 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 40 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = RobertaTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : Optional[int] = tuple(state['cls'] )
UpperCamelCase : Any = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : List[Any] = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple:
UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40 | 1 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 | 1 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[str]:
super().__init__()
UpperCamelCase : str = value_function
UpperCamelCase : Tuple = unet
UpperCamelCase : Dict = scheduler
UpperCamelCase : Dict = env
UpperCamelCase : List[str] = env.get_dataset()
UpperCamelCase : Optional[int] = {}
for key in self.data.keys():
try:
UpperCamelCase : Dict = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase : Optional[Any] = {}
for key in self.data.keys():
try:
UpperCamelCase : Tuple = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase : Optional[int] = env.observation_space.shape[0]
UpperCamelCase : List[str] = env.action_space.shape[0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
return (x_in - self.means[key]) / self.stds[key]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
return x_in * self.stds[key] + self.means[key]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
if type(SCREAMING_SNAKE_CASE_ ) is dict:
return {k: self.to_torch(SCREAMING_SNAKE_CASE_ ) for k, v in x_in.items()}
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
return x_in.to(self.unet.device )
return torch.tensor(SCREAMING_SNAKE_CASE_, device=self.unet.device )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
for key, val in cond.items():
UpperCamelCase : List[Any] = val.clone()
return x_in
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : str = x.shape[0]
UpperCamelCase : str = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase : Optional[Any] = torch.full((batch_size,), SCREAMING_SNAKE_CASE_, device=self.unet.device, dtype=torch.long )
for _ in range(SCREAMING_SNAKE_CASE_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase : List[str] = self.value_function(x.permute(0, 2, 1 ), SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase : Optional[int] = torch.autograd.grad([y.sum()], [x] )[0]
UpperCamelCase : Optional[int] = self.scheduler._get_variance(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.exp(0.5 * posterior_variance )
UpperCamelCase : Optional[Any] = model_std * grad
UpperCamelCase : List[Any] = 0
UpperCamelCase : str = x.detach()
UpperCamelCase : Dict = x + scale * grad
UpperCamelCase : Optional[int] = self.reset_xa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.action_dim )
UpperCamelCase : Dict = self.unet(x.permute(0, 2, 1 ), SCREAMING_SNAKE_CASE_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, predict_epsilon=SCREAMING_SNAKE_CASE_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
UpperCamelCase : str = self.reset_xa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.action_dim )
UpperCamelCase : int = self.to_torch(SCREAMING_SNAKE_CASE_ )
return x, y
def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.1 ) -> Dict:
# normalize the observations and create batch dimension
UpperCamelCase : Union[str, Any] = self.normalize(SCREAMING_SNAKE_CASE_, 'observations' )
UpperCamelCase : int = obs[None].repeat(SCREAMING_SNAKE_CASE_, axis=0 )
UpperCamelCase : List[str] = {0: self.to_torch(SCREAMING_SNAKE_CASE_ )}
UpperCamelCase : str = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase : Union[str, Any] = randn_tensor(SCREAMING_SNAKE_CASE_, device=self.unet.device )
UpperCamelCase : Dict = self.reset_xa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.action_dim )
UpperCamelCase : List[Any] = self.to_torch(SCREAMING_SNAKE_CASE_ )
# run the diffusion process
UpperCamelCase , UpperCamelCase : Optional[Any] = self.run_diffusion(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# sort output trajectories by value
UpperCamelCase : Union[str, Any] = y.argsort(0, descending=SCREAMING_SNAKE_CASE_ ).squeeze()
UpperCamelCase : Union[str, Any] = x[sorted_idx]
UpperCamelCase : List[Any] = sorted_values[:, :, : self.action_dim]
UpperCamelCase : Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase : Union[str, Any] = self.de_normalize(SCREAMING_SNAKE_CASE_, key='actions' )
# select the action with the highest value
if y is not None:
UpperCamelCase : List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase : List[Any] = np.random.randint(0, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = denorm_actions[selected_index, 0]
return denorm_actions
| 40 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCAmelCase = '''src/diffusers'''
__UpperCAmelCase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__UpperCAmelCase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__UpperCAmelCase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> Union[str, Any]:
return line.startswith(snake_case__ ) or len(snake_case__ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , snake_case__ ) is not None
def UpperCamelCase ( snake_case__ : str ) -> Optional[int]:
UpperCamelCase : Optional[Any] = object_name.split('.' )
UpperCamelCase : Optional[int] = 0
# First let's find the module where our object lives.
UpperCamelCase : List[Any] = parts[i]
while i < len(snake_case__ ) and not os.path.isfile(os.path.join(snake_case__ , F"""{module}.py""" ) ):
i += 1
if i < len(snake_case__ ):
UpperCamelCase : Tuple = os.path.join(snake_case__ , parts[i] )
if i >= len(snake_case__ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(snake_case__ , F"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : str = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase : Optional[int] = ''
UpperCamelCase : str = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__ ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase : Optional[int] = line_index
while line_index < len(snake_case__ ) and _should_continue(lines[line_index] , snake_case__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase : int = lines[start_index:line_index]
return "".join(snake_case__ )
__UpperCAmelCase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__UpperCAmelCase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__UpperCAmelCase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__ : str ) -> Tuple:
UpperCamelCase : int = code.split('\n' )
UpperCamelCase : Any = 0
while idx < len(snake_case__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case__ ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def UpperCamelCase ( snake_case__ : Dict ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = len(get_indent(snake_case__ ) ) > 0
if has_indent:
UpperCamelCase : int = F"""class Bla:\n{code}"""
UpperCamelCase : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case__ )
UpperCamelCase : List[str] = black.format_str(snake_case__ , mode=snake_case__ )
UpperCamelCase , UpperCamelCase : Tuple = style_docstrings_in_code(snake_case__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int]=False ) -> Optional[Any]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Union[str, Any] = f.readlines()
UpperCamelCase : Dict = []
UpperCamelCase : Optional[int] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__ ):
UpperCamelCase : str = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = search.groups()
UpperCamelCase : Any = find_code_in_diffusers(snake_case__ )
UpperCamelCase : str = get_indent(snake_case__ )
UpperCamelCase : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase : Tuple = theoretical_indent
UpperCamelCase : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase : Union[str, Any] = True
while line_index < len(snake_case__ ) and should_continue:
line_index += 1
if line_index >= len(snake_case__ ):
break
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _should_continue(snake_case__ , snake_case__ ) and re.search(F"""^{indent}# End copy""" , snake_case__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase : int = lines[start_index:line_index]
UpperCamelCase : int = ''.join(snake_case__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase : List[str] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(snake_case__ ) is None]
UpperCamelCase : List[str] = '\n'.join(snake_case__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__ ) > 0:
UpperCamelCase : List[Any] = replace_pattern.replace('with' , '' ).split(',' )
UpperCamelCase : Optional[Any] = [_re_replace_pattern.search(snake_case__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = pattern.groups()
UpperCamelCase : Optional[int] = re.sub(snake_case__ , snake_case__ , snake_case__ )
if option.strip() == "all-casing":
UpperCamelCase : str = re.sub(obja.lower() , obja.lower() , snake_case__ )
UpperCamelCase : Dict = re.sub(obja.upper() , obja.upper() , snake_case__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
UpperCamelCase : Dict = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCamelCase : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase : List[str] = start_index + 1
if overwrite and len(snake_case__ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
return diffs
def UpperCamelCase ( snake_case__ : bool = False ) -> int:
UpperCamelCase : Tuple = glob.glob(os.path.join(snake_case__ , '**/*.py' ) , recursive=snake_case__ )
UpperCamelCase : List[str] = []
for filename in all_files:
UpperCamelCase : Dict = is_copy_consistent(snake_case__ , snake_case__ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(snake_case__ ) > 0:
UpperCamelCase : Optional[Any] = '\n'.join(snake_case__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 40 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ShapEImgaImgPipeline
UpperCAmelCase__ : str = ["image"]
UpperCAmelCase__ : int = ["image"]
UpperCAmelCase__ : Optional[int] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Tuple = False
@property
def snake_case_ ( self ) -> Union[str, Any]:
return 32
@property
def snake_case_ ( self ) -> str:
return 32
@property
def snake_case_ ( self ) -> int:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
UpperCamelCase : List[Any] = CLIPVisionModel(SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = CLIPImageProcessor(
crop_size=224, do_center_crop=SCREAMING_SNAKE_CASE_, do_normalize=SCREAMING_SNAKE_CASE_, do_resize=SCREAMING_SNAKE_CASE_, image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], resample=3, size=224, )
return image_processor
@property
def snake_case_ ( self ) -> Any:
torch.manual_seed(0 )
UpperCamelCase : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : Any = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : List[Any] = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Dict = self.dummy_prior
UpperCamelCase : Tuple = self.dummy_image_encoder
UpperCamelCase : Optional[Any] = self.dummy_image_processor
UpperCamelCase : Any = self.dummy_renderer
UpperCamelCase : Optional[int] = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : Optional[int] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[Any]:
UpperCamelCase : Any = floats_tensor((1, 3, 64, 64), rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = 'cpu'
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[int] = output.images[0]
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : Any = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = torch_device == 'cpu'
UpperCamelCase : Any = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 1
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : str = batch_size * [inputs[key]]
UpperCamelCase : Any = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCamelCase : Optional[int] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : int = pipe(
SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : str = jnp.ones((batch_size, length) ) / length
return scores
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[Any] = None
UpperCamelCase : Union[str, Any] = 20
UpperCamelCase : int = self._get_uniform_logits(batch_size=2, length=SCREAMING_SNAKE_CASE_ )
# tweak scores to not be uniform anymore
UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCamelCase : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCamelCase : Tuple = jax.nn.softmax(SCREAMING_SNAKE_CASE_, axis=-1 )
UpperCamelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : int = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCamelCase : int = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE_, scores.copy(), cur_len=SCREAMING_SNAKE_CASE_ ), axis=-1 )
UpperCamelCase : Dict = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE_, scores.copy(), cur_len=SCREAMING_SNAKE_CASE_ ), axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min() )
def snake_case_ ( self ) -> Any:
UpperCamelCase : List[str] = None
UpperCamelCase : Tuple = 10
UpperCamelCase : List[Any] = 2
# create ramp distribution
UpperCamelCase : str = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, vocab_size) ).copy()
UpperCamelCase : Optional[int] = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCamelCase : Optional[int] = FlaxTopKLogitsWarper(3 )
UpperCamelCase : List[str] = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(), 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(), 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCamelCase : Tuple = 5
UpperCamelCase : List[str] = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3 )
UpperCamelCase : List[Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, length) ).copy()
UpperCamelCase : List[Any] = top_k_warp_safety_check(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(), [2, 2] )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = 10
UpperCamelCase : Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCamelCase : Union[str, Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCamelCase : str = FlaxTopPLogitsWarper(0.8 )
UpperCamelCase : Dict = np.exp(top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCamelCase : List[str] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCamelCase : Dict = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCamelCase : Optional[int] = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
UpperCamelCase : Optional[Any] = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0 )
UpperCamelCase : Any = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(), [3, 2] )
def snake_case_ ( self ) -> Any:
UpperCamelCase : str = 20
UpperCamelCase : Tuple = 4
UpperCamelCase : Tuple = 0
UpperCamelCase : str = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that min length is applied at length 5
UpperCamelCase : int = ids_tensor((batch_size, 20), vocab_size=20 )
UpperCamelCase : List[Any] = 5
UpperCamelCase : List[str] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = min_dist_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
UpperCamelCase : Union[str, Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 15
UpperCamelCase : List[str] = min_dist_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[Any] = 20
UpperCamelCase : Dict = 4
UpperCamelCase : Any = 0
UpperCamelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the bos_token_id score
UpperCamelCase : int = ids_tensor((batch_size, 1), vocab_size=20 )
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Optional[int] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCamelCase : Tuple = 3
UpperCamelCase : Dict = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = 20
UpperCamelCase : Optional[Any] = 4
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Optional[int] = 5
UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCamelCase : Any = ids_tensor((batch_size, 4), vocab_size=20 )
UpperCamelCase : Optional[Any] = 4
UpperCamelCase : List[str] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCamelCase : int = 3
UpperCamelCase : List[Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[Any] = 4
UpperCamelCase : Dict = 10
UpperCamelCase : Dict = 15
UpperCamelCase : Any = 2
UpperCamelCase : Tuple = 1
UpperCamelCase : Union[str, Any] = 15
# dummy input_ids and scores
UpperCamelCase : List[str] = ids_tensor((batch_size, sequence_length), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = input_ids.copy()
UpperCamelCase : Tuple = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = scores.copy()
# instantiate all dist processors
UpperCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
UpperCamelCase : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = 10
# no processor list
UpperCamelCase : Optional[int] = temp_dist_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = min_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = bos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = eos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# with processor list
UpperCamelCase : int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase : int = processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = 4
UpperCamelCase : int = 10
UpperCamelCase : Dict = 15
UpperCamelCase : Tuple = 2
UpperCamelCase : List[Any] = 1
UpperCamelCase : Union[str, Any] = 15
# dummy input_ids and scores
UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, sequence_length), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = input_ids.copy()
UpperCamelCase : List[str] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
UpperCamelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase : Any = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = 10
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = temp_dist_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = min_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = bos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = eos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase : Any = processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
return scores
UpperCamelCase : List[Any] = jax.jit(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = jax.jit(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = jitted_run_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
| 40 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__UpperCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__UpperCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Dict = "whisper"
UpperCAmelCase__ : int = ["past_key_values"]
UpperCAmelCase__ : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, SCREAMING_SNAKE_CASE_=5_1865, SCREAMING_SNAKE_CASE_=80, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=1536, SCREAMING_SNAKE_CASE_=1536, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=5_0257, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1500, SCREAMING_SNAKE_CASE_=448, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=[220, 5_0256], SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.05, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=7, **SCREAMING_SNAKE_CASE_, ) -> int:
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Any = num_mel_bins
UpperCamelCase : Any = d_model
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : Optional[int] = encoder_attention_heads
UpperCamelCase : Union[str, Any] = decoder_layers
UpperCamelCase : Any = decoder_attention_heads
UpperCamelCase : Optional[int] = decoder_ffn_dim
UpperCamelCase : int = encoder_ffn_dim
UpperCamelCase : Optional[int] = dropout
UpperCamelCase : int = attention_dropout
UpperCamelCase : Any = activation_dropout
UpperCamelCase : Union[str, Any] = activation_function
UpperCamelCase : Any = init_std
UpperCamelCase : List[Any] = encoder_layerdrop
UpperCamelCase : int = decoder_layerdrop
UpperCamelCase : Dict = use_cache
UpperCamelCase : Dict = encoder_layers
UpperCamelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase : int = max_source_positions
UpperCamelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase : Optional[Any] = classifier_proj_size
UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase : List[str] = apply_spec_augment
UpperCamelCase : Optional[int] = mask_time_prob
UpperCamelCase : Dict = mask_time_length
UpperCamelCase : str = mask_time_min_masks
UpperCamelCase : List[str] = mask_feature_prob
UpperCamelCase : Union[str, Any] = mask_feature_length
UpperCamelCase : int = mask_feature_min_masks
UpperCamelCase : int = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, is_encoder_decoder=SCREAMING_SNAKE_CASE_, decoder_start_token_id=SCREAMING_SNAKE_CASE_, suppress_tokens=SCREAMING_SNAKE_CASE_, begin_suppress_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
class lowerCAmelCase_ ( a__ ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase : List[Any] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase : Any = {0: 'batch'}
else:
UpperCamelCase : Dict = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs' )
return common_inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 2_2050, SCREAMING_SNAKE_CASE_ = 5.0, SCREAMING_SNAKE_CASE_ = 220, ) -> Mapping[str, Any]:
UpperCamelCase : Any = OrderedDict()
UpperCamelCase : str = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_, sampling_rate=SCREAMING_SNAKE_CASE_, time_duration=SCREAMING_SNAKE_CASE_, frequency=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Union[str, Any] = encoder_inputs['input_features'].shape[2]
UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = encoder_inputs.pop('input_features' )
UpperCamelCase : Any = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCamelCase : Optional[int] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def snake_case_ ( self ) -> float:
return 1e-3
| 40 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def UpperCamelCase ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ) -> Union[str, Any]:
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(snake_case__ )
UpperCamelCase : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Any ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase : Tuple = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCamelCase : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCamelCase : str = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ) -> Dict:
model.eval()
UpperCamelCase : Union[str, Any] = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(**snake_case__ )
UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase , UpperCamelCase : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
UpperCamelCase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCamelCase : int = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> Optional[int]:
# Initialize accelerator
UpperCamelCase : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Optional[int] = config['lr']
UpperCamelCase : str = int(config['num_epochs'] )
UpperCamelCase : Tuple = int(config['seed'] )
UpperCamelCase : List[str] = int(config['batch_size'] )
UpperCamelCase : Optional[Any] = args.model_name_or_path
set_seed(snake_case__ )
UpperCamelCase , UpperCamelCase : Optional[Any] = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : int = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
UpperCamelCase : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase : int = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Optional[Any] = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
UpperCamelCase : List[str] = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase : Dict = 0
UpperCamelCase : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
UpperCamelCase : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase : Tuple = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase : Any = args.resume_from_checkpoint.split('epoch_' )[1]
UpperCamelCase : str = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase : Dict = int(snake_case__ ) + 1
UpperCamelCase : str = evaluation_loop(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
accelerator.print('resumed checkpoint performance:' , snake_case__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , 'r' ) as f:
UpperCamelCase : Dict = json.load(snake_case__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase : Union[str, Any] = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
UpperCamelCase : Optional[Any] = model(**snake_case__ )
UpperCamelCase : List[Any] = outputs.loss
UpperCamelCase : Dict = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase : List[str] = F"""epoch_{epoch}"""
UpperCamelCase : List[str] = os.path.join(args.output_dir , snake_case__ )
accelerator.save_state(snake_case__ )
UpperCamelCase : Dict = evaluation_loop(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Union[str, Any] = accuracy
UpperCamelCase : Union[str, Any] = lr_scheduler.get_lr()[0]
UpperCamelCase : List[Any] = optimizer.param_groups[0]['lr']
UpperCamelCase : Any = epoch
UpperCamelCase : Any = overall_step
accelerator.print(F"""epoch {epoch}:""" , snake_case__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def UpperCamelCase ( ) -> Union[str, Any]:
UpperCamelCase : Dict = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=snake_case__ , default=snake_case__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=snake_case__ , default=snake_case__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=2 , help='Number of train epochs.' , )
UpperCamelCase : int = parser.parse_args()
UpperCamelCase : List[str] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 40 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__ : Tuple="" ) -> str:
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' )
sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 )
UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) )
UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = 'Hey!'
UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 | 1 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ : Union[str, Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def snake_case_ ( self ) -> int:
UpperCamelCase : Any = pipeline(task='text-generation', model='sshleifer/tiny-ctrl', framework='pt' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase : Union[str, Any] = text_generator('This is a test', do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
], )
UpperCamelCase : Any = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
], )
UpperCamelCase : Optional[int] = text_generator('This is a test', do_sample=SCREAMING_SNAKE_CASE_, num_return_sequences=2, return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
], )
UpperCamelCase : Union[str, Any] = text_generator.model.config.eos_token_id
UpperCamelCase : Union[str, Any] = '<pad>'
UpperCamelCase : str = text_generator(
['This is a test', 'This is a second test'], do_sample=SCREAMING_SNAKE_CASE_, num_return_sequences=2, batch_size=2, return_tensors=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
[
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
],
], )
@require_tf
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = pipeline(task='text-generation', model='sshleifer/tiny-ctrl', framework='tf' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase : Optional[int] = text_generator('This is a test', do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
], )
UpperCamelCase : Dict = text_generator(['This is a test', 'This is a second test'], do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
], )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : List[Any] = TextGenerationPipeline(model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
return text_generator, ["This is a test", "Another test"]
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Tuple = 'Hello I believe in'
UpperCamelCase : Optional[int] = pipeline('text-generation', model='hf-internal-testing/tiny-random-gpt2' )
UpperCamelCase : List[str] = text_generator(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}], )
UpperCamelCase : Optional[Any] = text_generator(SCREAMING_SNAKE_CASE_, stop_sequence=' fe' )
self.assertEqual(SCREAMING_SNAKE_CASE_, [{'generated_text': 'Hello I believe in fe'}] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : Optional[int] = text_generator.model
UpperCamelCase : List[Any] = text_generator.tokenizer
UpperCamelCase : Union[str, Any] = text_generator('This is a test' )
self.assertEqual(SCREAMING_SNAKE_CASE_, [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
UpperCamelCase : Optional[int] = text_generator('This is a test', return_full_text=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertNotIn('This is a test', outputs[0]['generated_text'] )
UpperCamelCase : Tuple = pipeline(task='text-generation', model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, return_full_text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = text_generator('This is a test' )
self.assertEqual(SCREAMING_SNAKE_CASE_, [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertNotIn('This is a test', outputs[0]['generated_text'] )
UpperCamelCase : Dict = text_generator('This is a test', return_full_text=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
UpperCamelCase : List[Any] = text_generator(['This is great !', 'Something else'], num_return_sequences=2, do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
[{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}, {'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}],
[{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}, {'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}],
], )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase : Optional[Any] = text_generator(
['This is great !', 'Something else'], num_return_sequences=2, batch_size=2, do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
[{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}, {'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}],
[{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}, {'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}],
], )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = text_generator('test', return_full_text=SCREAMING_SNAKE_CASE_, return_text=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = text_generator('test', return_full_text=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = text_generator('test', return_text=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase : Dict = text_generator('' )
self.assertEqual(SCREAMING_SNAKE_CASE_, [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase : Optional[Any] = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase : str = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500, max_new_tokens=20 )
UpperCamelCase : Dict = text_generator('This is a test' * 500, handle_long_generation='hole', max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
text_generator(
'This is a test' * 500, handle_long_generation='hole', max_new_tokens=tokenizer.model_max_length + 10, )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case_ ( self ) -> Any:
import torch
# Classic `model_kwargs`
UpperCamelCase : int = pipeline(
model='hf-internal-testing/tiny-random-bloom', model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa}, )
self.assertEqual(pipe.model.device, torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloataa )
UpperCamelCase : Any = pipe('This is a test' )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
], )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase : List[str] = pipeline(model='hf-internal-testing/tiny-random-bloom', device_map='auto', torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device, torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloataa )
UpperCamelCase : Optional[Any] = pipe('This is a test' )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
], )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase : Dict = pipeline(model='hf-internal-testing/tiny-random-bloom', device_map='auto' )
self.assertEqual(pipe.model.device, torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.floataa )
UpperCamelCase : Optional[int] = pipe('This is a test' )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
], )
@require_torch
@require_torch_gpu
def snake_case_ ( self ) -> Union[str, Any]:
import torch
UpperCamelCase : Optional[Any] = pipeline(model='hf-internal-testing/tiny-random-bloom', device=0, torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case_ ( self ) -> int:
import torch
UpperCamelCase : Dict = pipeline(model='hf-internal-testing/tiny-random-bloom', device_map='auto', torch_dtype=torch.floataa )
pipe('This is a test', do_sample=SCREAMING_SNAKE_CASE_, top_p=0.5 )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = 'Hello world'
UpperCamelCase : Optional[int] = pipeline('text-generation', model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
UpperCamelCase : Dict = logging.get_logger('transformers.generation.tf_utils' )
else:
UpperCamelCase : List[Any] = logging.get_logger('transformers.generation.utils' )
UpperCamelCase : Dict = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
UpperCamelCase : Tuple = text_generator(SCREAMING_SNAKE_CASE_, max_length=10, max_new_tokens=1 )
self.assertIn(SCREAMING_SNAKE_CASE_, cl.out )
# The user only sets one -> no warning
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
UpperCamelCase : Tuple = text_generator(SCREAMING_SNAKE_CASE_, max_new_tokens=1 )
self.assertNotIn(SCREAMING_SNAKE_CASE_, cl.out )
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
UpperCamelCase : List[str] = text_generator(SCREAMING_SNAKE_CASE_, max_length=10 )
self.assertNotIn(SCREAMING_SNAKE_CASE_, cl.out )
| 40 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = "sew-d"
def __init__( self, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=("p2c", "c2p"), SCREAMING_SNAKE_CASE_="layer_norm", SCREAMING_SNAKE_CASE_="gelu_python", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-7, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_="group", SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), SCREAMING_SNAKE_CASE_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), SCREAMING_SNAKE_CASE_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0.05, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_="mean", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_, pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = hidden_size
UpperCamelCase : int = feat_extract_norm
UpperCamelCase : Any = feat_extract_activation
UpperCamelCase : List[str] = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = conv_bias
UpperCamelCase : List[Any] = num_conv_pos_embeddings
UpperCamelCase : Tuple = num_conv_pos_embedding_groups
UpperCamelCase : Dict = len(self.conv_dim )
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : List[Any] = squeeze_factor
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Optional[Any] = position_buckets
UpperCamelCase : Optional[int] = share_att_key
UpperCamelCase : List[str] = relative_attention
UpperCamelCase : List[str] = norm_rel_ebd
UpperCamelCase : List[Any] = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Tuple = hidden_dropout
UpperCamelCase : Optional[int] = attention_dropout
UpperCamelCase : Dict = activation_dropout
UpperCamelCase : Union[str, Any] = feat_proj_dropout
UpperCamelCase : Tuple = final_dropout
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Tuple = feature_layer_norm_eps
UpperCamelCase : Dict = initializer_range
UpperCamelCase : Optional[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase : List[Any] = apply_spec_augment
UpperCamelCase : str = mask_time_prob
UpperCamelCase : Union[str, Any] = mask_time_length
UpperCamelCase : Tuple = mask_time_min_masks
UpperCamelCase : List[Any] = mask_feature_prob
UpperCamelCase : List[Any] = mask_feature_length
UpperCamelCase : Dict = mask_feature_min_masks
# ctc loss
UpperCamelCase : int = ctc_loss_reduction
UpperCamelCase : Tuple = ctc_zero_infinity
# sequence classification
UpperCamelCase : str = use_weighted_layer_sum
UpperCamelCase : Optional[Any] = classifier_proj_size
@property
def snake_case_ ( self ) -> List[str]:
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 40 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ : Optional[Any]=2 , snake_case__ : int=3 , snake_case__ : int=16 , snake_case__ : int = 10 , snake_case__ : int = 2 ) -> Any:
def get_dataset(snake_case__ : Dict ):
UpperCamelCase : List[str] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase : Dict = get_dataset(snake_case__ )
UpperCamelCase : Optional[int] = get_dataset(snake_case__ )
UpperCamelCase : Tuple = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
UpperCamelCase : int = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Union[str, Any]=None ) -> List[str]:
UpperCamelCase : Optional[Any] = []
for epoch in range(snake_case__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase : Union[str, Any] = batch
UpperCamelCase : str = model(snake_case__ )
UpperCamelCase : Union[str, Any] = torch.nn.functional.mse_loss(snake_case__ , snake_case__ )
accelerator.backward(snake_case__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ) -> Dict:
super().__init__()
UpperCamelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCamelCase : Any = nn.Parameter(torch.randn(1 ) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return x * self.a + self.b
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : List[Any] = DummyModel()
UpperCamelCase : Any = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : Optional[int] = dummy_dataloaders()
UpperCamelCase : str = ProjectConfiguration(total_limit=1, project_dir=SCREAMING_SNAKE_CASE_, automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase : List[Any] = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ), 1 )
def snake_case_ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : int = DummyModel()
UpperCamelCase : List[str] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : Dict = dummy_dataloaders()
# Train baseline
UpperCamelCase : Optional[Any] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save initial
UpperCamelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_, 'initial' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : List[Any] = model.a.item(), model.b.item()
UpperCamelCase : Dict = optimizer.state_dict()
UpperCamelCase : List[str] = train(3, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : List[Any] = model.a.item(), model.b.item()
UpperCamelCase : str = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase : Any = DummyModel()
UpperCamelCase : Any = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : Union[str, Any] = dummy_dataloaders()
UpperCamelCase : Union[str, Any] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : int = model.a.item(), model.b.item()
UpperCamelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = train(2, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save everything
UpperCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoint' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : List[str] = model.a.item(), model.b.item()
UpperCamelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : Optional[Any] = DummyModel()
UpperCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : Tuple = dummy_dataloaders()
UpperCamelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase : Tuple = Accelerator(project_dir=SCREAMING_SNAKE_CASE_, project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) : Any = model.a.item(), model.b.item()
UpperCamelCase : Optional[int] = optimizer.state_dict()
UpperCamelCase : List[Any] = train(3, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCamelCase : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase : int = DummyModel()
UpperCamelCase : Dict = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : Optional[int] = dummy_dataloaders()
UpperCamelCase : Optional[int] = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_, project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_0' ) )
((UpperCamelCase) , (UpperCamelCase)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCamelCase : Optional[Any] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = train(2, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_1' ) )
test_rands += train(1, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : Dict = model.a.item(), model.b.item()
UpperCamelCase : str = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Optional[int] = torch.tensor([1, 2, 3] )
UpperCamelCase : str = torch.tensor([2, 3, 4] )
UpperCamelCase : Dict = DummyModel()
UpperCamelCase : Optional[int] = torch.optim.Adam(net.parameters() )
UpperCamelCase : int = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def snake_case_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : Tuple = DummyModel()
UpperCamelCase : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase : List[str] = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_, step_size=1, gamma=0.99 )
UpperCamelCase , UpperCamelCase : Union[str, Any] = dummy_dataloaders()
UpperCamelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase : str = Accelerator(project_dir=SCREAMING_SNAKE_CASE_, project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
UpperCamelCase : List[str] = scheduler.state_dict()
train(3, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_, scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_0' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_, scheduler.state_dict() )
def snake_case_ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : Optional[int] = DummyModel()
UpperCamelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_, total_limit=2 )
# Train baseline
UpperCamelCase : str = Accelerator(project_dir=SCREAMING_SNAKE_CASE_, project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_10' ) ) )
@require_cuda
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_, env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '''/tmp/accelerate/state_checkpointing'''
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__UpperCAmelCase = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__UpperCAmelCase = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 40 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase ( snake_case__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> int:
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , snake_case__ , )
if isinstance(snake_case__ , torch.Tensor ):
return image
elif isinstance(snake_case__ , PIL.Image.Image ):
UpperCamelCase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase , UpperCamelCase : Optional[int] = image[0].size
UpperCamelCase , UpperCamelCase : int = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCamelCase : Dict = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
UpperCamelCase : Tuple = np.concatenate(snake_case__ , axis=0 )
UpperCamelCase : int = np.array(snake_case__ ).astype(np.floataa ) / 255.0
UpperCamelCase : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase : Dict = 2.0 * image - 1.0
UpperCamelCase : Tuple = torch.from_numpy(snake_case__ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase : Any = torch.cat(snake_case__ , dim=0 )
return image
def UpperCamelCase ( snake_case__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[int]:
if isinstance(snake_case__ , torch.Tensor ):
return mask
elif isinstance(snake_case__ , PIL.Image.Image ):
UpperCamelCase : List[str] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCamelCase , UpperCamelCase : Union[str, Any] = mask[0].size
UpperCamelCase , UpperCamelCase : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase : Union[str, Any] = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
UpperCamelCase : Optional[Any] = np.concatenate(snake_case__ , axis=0 )
UpperCamelCase : Union[str, Any] = mask.astype(np.floataa ) / 255.0
UpperCamelCase : List[str] = 0
UpperCamelCase : Optional[Any] = 1
UpperCamelCase : Tuple = torch.from_numpy(snake_case__ )
elif isinstance(mask[0] , torch.Tensor ):
UpperCamelCase : Union[str, Any] = torch.cat(snake_case__ , dim=0 )
return mask
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : UNetaDModel
UpperCAmelCase__ : RePaintScheduler
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 250, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 10, SCREAMING_SNAKE_CASE_ = 10, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "pil", SCREAMING_SNAKE_CASE_ = True, ) -> Union[ImagePipelineOutput, Tuple]:
UpperCamelCase : List[Any] = image
UpperCamelCase : Optional[Any] = _preprocess_image(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = original_image.to(device=self.device, dtype=self.unet.dtype )
UpperCamelCase : Tuple = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = mask_image.to(device=self.device, dtype=self.unet.dtype )
UpperCamelCase : Optional[Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase : str = original_image.shape
UpperCamelCase : Dict = randn_tensor(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.device )
UpperCamelCase : List[str] = eta
UpperCamelCase : int = self.scheduler.timesteps[0] + 1
UpperCamelCase : Optional[Any] = generator[0] if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCamelCase : List[Any] = self.unet(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
UpperCamelCase : Union[str, Any] = self.scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCamelCase : Union[str, Any] = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = t
UpperCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase : Union[str, Any] = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : str = ShapEPipeline
UpperCAmelCase__ : Union[str, Any] = ["prompt"]
UpperCAmelCase__ : List[str] = ["prompt"]
UpperCAmelCase__ : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Optional[Any] = False
@property
def snake_case_ ( self ) -> List[Any]:
return 32
@property
def snake_case_ ( self ) -> List[Any]:
return 32
@property
def snake_case_ ( self ) -> Dict:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : str = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : List[Any] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : Tuple = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> str:
UpperCamelCase : List[Any] = self.dummy_prior
UpperCamelCase : Union[str, Any] = self.dummy_text_encoder
UpperCamelCase : List[str] = self.dummy_tokenizer
UpperCamelCase : Dict = self.dummy_renderer
UpperCamelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : Tuple = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Dict:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> Any:
UpperCamelCase : int = 'cpu'
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = output.images[0]
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : Optional[Any] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[Any] = torch_device == 'cpu'
UpperCamelCase : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : Dict = batch_size * [inputs[key]]
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> int:
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
UpperCamelCase : Optional[Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
UpperCamelCase : Tuple = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : List[Any] = pipe(
'a shark', generator=SCREAMING_SNAKE_CASE_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
def UpperCamelCase ( ) -> List[str]:
UpperCamelCase : Optional[Any] = 0
for i in range(1 , 1001 ):
total += i**i
return str(snake_case__ )[-10:]
if __name__ == "__main__":
print(solution())
| 40 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Optional[int]:
UpperCamelCase : Dict = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : Tuple = is_training
UpperCamelCase : List[str] = use_auxiliary_loss
UpperCamelCase : Tuple = num_queries
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : Union[str, Any] = min_size
UpperCamelCase : Any = max_size
UpperCamelCase : Optional[int] = num_labels
UpperCamelCase : Dict = hidden_dim
UpperCamelCase : Any = hidden_dim
def snake_case_ ( self ) -> str:
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCamelCase : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCamelCase : Union[str, Any] = self.num_queries
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Union[str, Any] = [1, 1, 1, 1]
UpperCamelCase : Optional[int] = self.num_channels
UpperCamelCase : Union[str, Any] = 64
UpperCamelCase : Optional[int] = 128
UpperCamelCase : Optional[Any] = self.hidden_dim
UpperCamelCase : Union[str, Any] = self.hidden_dim
UpperCamelCase : str = self.hidden_dim
return config
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase : Optional[int] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : List[str] = output.encoder_hidden_states
UpperCamelCase : int = output.pixel_decoder_hidden_states
UpperCamelCase : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> List[Any]:
with torch.no_grad():
UpperCamelCase : Union[str, Any] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : Any = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = MaskaFormerModelTester(self )
UpperCamelCase : Tuple = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Any:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def snake_case_ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def snake_case_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Tuple:
pass
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[str] = [*signature.parameters.keys()]
UpperCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> List[str]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase : List[str] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : List[str] = (self.model_tester.min_size,) * 2
UpperCamelCase : Any = {
'pixel_values': torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
'mask_labels': torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
'class_labels': torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCamelCase : List[str] = self.model_tester.get_config()
UpperCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def snake_case_ ( self ) -> int:
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def snake_case_ ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase : Optional[Any] = self.all_model_classes[1]
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
UpperCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = self.all_model_classes[1]
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase : Tuple = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase = 1e-4
def UpperCamelCase ( ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case_ ( self ) -> Any:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.default_image_processor
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[int] = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase : str = self.default_image_processor
UpperCamelCase : Optional[Any] = prepare_img()
UpperCamelCase : str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCamelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase : Optional[int] = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
UpperCamelCase : List[str] = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase : Tuple = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase : int = self.default_image_processor
UpperCamelCase : List[str] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors='pt', )
UpperCamelCase : Tuple = inputs['pixel_values'].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['mask_labels']]
UpperCamelCase : str = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase : Any = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 40 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : Optional[int] ) -> List[List[ImageInput]]:
if isinstance(snake_case__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(snake_case__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(snake_case__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Tuple = ["pixel_values"]
def __init__( self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = size if size is not None else {'shortest_edge': 256}
UpperCamelCase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_, param_name='crop_size' )
UpperCamelCase : Optional[int] = do_resize
UpperCamelCase : Optional[Any] = size
UpperCamelCase : Any = do_center_crop
UpperCamelCase : Tuple = crop_size
UpperCamelCase : Dict = resample
UpperCamelCase : int = do_rescale
UpperCamelCase : Optional[Any] = rescale_factor
UpperCamelCase : List[str] = offset
UpperCamelCase : int = do_normalize
UpperCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCamelCase : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" in size:
UpperCamelCase : List[str] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_, size['shortest_edge'], default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
UpperCamelCase : Dict = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_, size=(size['height'], size['width']), data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
UpperCamelCase : Tuple = image.astype(np.floataa )
if offset:
UpperCamelCase : Tuple = image - (scale / 2)
return rescale(SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
UpperCamelCase : Optional[int] = to_numpy_array(SCREAMING_SNAKE_CASE_ )
if do_resize:
UpperCamelCase : Dict = self.resize(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_ )
if do_center_crop:
UpperCamelCase : Union[str, Any] = self.center_crop(SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_ )
if do_rescale:
UpperCamelCase : Optional[Any] = self.rescale(image=SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_, offset=SCREAMING_SNAKE_CASE_ )
if do_normalize:
UpperCamelCase : int = self.normalize(image=SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = to_channel_dimension_format(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return image
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> PIL.Image.Image:
UpperCamelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : List[str] = resample if resample is not None else self.resample
UpperCamelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : int = offset if offset is not None else self.offset
UpperCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : Dict = image_std if image_std is not None else self.image_std
UpperCamelCase : Optional[Any] = size if size is not None else self.size
UpperCamelCase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE_, param_name='crop_size' )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase : Tuple = make_batched(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = [
[
self._preprocess_image(
image=SCREAMING_SNAKE_CASE_, do_resize=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_, do_center_crop=SCREAMING_SNAKE_CASE_, crop_size=SCREAMING_SNAKE_CASE_, do_rescale=SCREAMING_SNAKE_CASE_, rescale_factor=SCREAMING_SNAKE_CASE_, offset=SCREAMING_SNAKE_CASE_, do_normalize=SCREAMING_SNAKE_CASE_, image_mean=SCREAMING_SNAKE_CASE_, image_std=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, )
for img in video
]
for video in videos
]
UpperCamelCase : str = {'pixel_values': videos}
return BatchFeature(data=SCREAMING_SNAKE_CASE_, tensor_type=SCREAMING_SNAKE_CASE_ )
| 40 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 | 1 |
from random import randint, random
def UpperCamelCase ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 5 , ) -> list:
UpperCamelCase : str = [[-1] * number_of_cells] # Create a highway without any car
UpperCamelCase : List[str] = 0
UpperCamelCase : List[str] = max(snake_case__ , 0 )
while i < number_of_cells:
UpperCamelCase : int = (
randint(0 , snake_case__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCamelCase ( snake_case__ : list , snake_case__ : int ) -> int:
UpperCamelCase : Dict = 0
UpperCamelCase : List[str] = highway_now[car_index + 1 :]
for cell in range(len(snake_case__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(snake_case__ , -1 )
def UpperCamelCase ( snake_case__ : list , snake_case__ : float , snake_case__ : int ) -> list:
UpperCamelCase : Any = len(snake_case__ )
# Beforce calculations, the highway is empty
UpperCamelCase : Optional[int] = [-1] * number_of_cells
for car_index in range(snake_case__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
UpperCamelCase : int = min(highway_now[car_index] + 1 , snake_case__ )
# Number of empty cell before the next car
UpperCamelCase : str = get_distance(snake_case__ , snake_case__ ) - 1
# We can't have the car causing an accident
UpperCamelCase : Union[str, Any] = min(next_highway[car_index] , snake_case__ )
if random() < probability:
# Randomly, a driver will slow down
UpperCamelCase : Any = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCamelCase ( snake_case__ : list , snake_case__ : int , snake_case__ : float , snake_case__ : int ) -> list:
UpperCamelCase : Dict = len(highway[0] )
for i in range(snake_case__ ):
UpperCamelCase : List[str] = update(highway[i] , snake_case__ , snake_case__ )
UpperCamelCase : Tuple = [-1] * number_of_cells
for car_index in range(snake_case__ ):
UpperCamelCase : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
UpperCamelCase : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
UpperCamelCase : Optional[int] = speed
highway.append(snake_case__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The column name of the images in the files."} )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case_ ( self ) -> int:
UpperCamelCase : str = {}
if self.train_dir is not None:
UpperCamelCase : Optional[Any] = self.train_dir
if self.validation_dir is not None:
UpperCamelCase : List[str] = self.validation_dir
UpperCamelCase : List[str] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(
default=a__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
UpperCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase__ : str = field(default=a__ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase__ : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : float = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def UpperCamelCase ( snake_case__ : int ) -> Any:
UpperCamelCase : List[Any] = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def UpperCamelCase ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase : int = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase : Dict = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
UpperCamelCase : List[Any] = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase : Optional[int] = split['train']
UpperCamelCase : List[str] = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Tuple = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase : int = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : str = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase : Tuple = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : int = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : List[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase : Tuple = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase : List[str] = ViTMAEForPreTraining(snake_case__ )
if training_args.do_train:
UpperCamelCase : Tuple = ds['train'].column_names
else:
UpperCamelCase : Dict = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase : Any = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase : Dict = 'image'
elif "img" in column_names:
UpperCamelCase : Optional[int] = 'img'
else:
UpperCamelCase : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase : Optional[int] = image_processor.size['shortest_edge']
else:
UpperCamelCase : Optional[Any] = (image_processor.size['height'], image_processor.size['width'])
UpperCamelCase : List[Any] = Compose(
[
Lambda(lambda snake_case__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case__ : List[Any] ):
UpperCamelCase : Dict = [transforms(snake_case__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase : str = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Compute absolute learning rate
UpperCamelCase : Optional[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase : Optional[int] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase : List[Any] = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
UpperCamelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase : Optional[int] = last_checkpoint
UpperCamelCase : Tuple = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase : Dict = trainer.evaluate()
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
# Write model card and (optionally) push to hub
UpperCamelCase : Optional[Any] = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def UpperCamelCase ( snake_case__ : int ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 40 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=[10, 20, 30, 40], SCREAMING_SNAKE_CASE_=[2, 2, 3, 2], SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"], SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : int = num_stages
UpperCamelCase : Any = hidden_sizes
UpperCamelCase : Tuple = depths
UpperCamelCase : Tuple = is_training
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : Dict = type_sequence_label_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = out_features
UpperCamelCase : str = num_labels
UpperCamelCase : Optional[int] = scope
UpperCamelCase : Tuple = num_stages
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Optional[int]:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case_ ( self ) -> Optional[int]:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=SCREAMING_SNAKE_CASE_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=SCREAMING_SNAKE_CASE_, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : int = UperNetForSemanticSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : str = config_and_inputs
UpperCamelCase : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = UperNetModelTester(self )
UpperCamelCase : int = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ) -> Dict:
return
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : int = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : int = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def snake_case_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def snake_case_ ( self ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> List[str]:
pass
def snake_case_ ( self ) -> Optional[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : str = _config_zero_init(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=F"""Parameter {name} of model {model_class} seems not properly initialized""", )
@unittest.skip(reason='UperNet does not have tied weights' )
def snake_case_ ( self ) -> List[Any]:
pass
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = UperNetForSemanticSegmentation.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Optional[Any]:
UpperCamelCase : Dict = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
UpperCamelCase : List[Any] = Image.open(snake_case__ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
UpperCamelCase : int = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
UpperCamelCase : Dict = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = prepare_img()
UpperCamelCase : List[Any] = processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 40 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__UpperCAmelCase = ['''bert-base-uncased''', '''bert-base-cased''']
__UpperCAmelCase = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class lowerCAmelCase_ ( tf.keras.Model ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
super().__init__()
UpperCamelCase : Optional[int] = tokenizer
UpperCamelCase : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = TFAutoModel.from_config(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.bert(**SCREAMING_SNAKE_CASE_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Dict:
super().setUp()
UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_, use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase : str = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase : Optional[int] = list(zip(self.test_sentences, self.test_sentences[::-1] ) )
def snake_case_ ( self ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors='tf', padding='longest' )
UpperCamelCase : int = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key], tf.intaa ) == tf_outputs[key] ) )
@slow
def snake_case_ ( self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences )
UpperCamelCase : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences], text_pair=[sentence[1] for sentence in self.paired_sentences], )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key], tf.intaa ) == separated_outputs[key] ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase : Optional[Any] = tf.function(SCREAMING_SNAKE_CASE_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase : Dict = tf.constant(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = compiled_tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case_ ( self ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase : Any = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ) / 'saved.model'
model.save(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = tf.keras.models.load_model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = loaded_model(SCREAMING_SNAKE_CASE_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ), 1e-5 )
| 40 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = RobertaTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : Optional[int] = tuple(state['cls'] )
UpperCamelCase : Any = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : List[Any] = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple:
UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 | 1 |
import itertools
import math
def UpperCamelCase ( snake_case__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase ( ) -> List[Any]:
UpperCamelCase : List[str] = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def UpperCamelCase ( snake_case__ : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 40 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
import argparse
import os
import re
import packaging.version
__UpperCAmelCase = '''examples/'''
__UpperCAmelCase = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCAmelCase = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCAmelCase = '''README.md'''
def UpperCamelCase ( snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ) -> Dict:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : List[str] = f.read()
UpperCamelCase , UpperCamelCase : List[Any] = REPLACE_PATTERNS[pattern]
UpperCamelCase : Any = replace.replace('VERSION' , snake_case__ )
UpperCamelCase : str = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case__ )
def UpperCamelCase ( snake_case__ : Any ) -> int:
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int=False ) -> Dict:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def UpperCamelCase ( ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = '🤗 Transformers currently provides the following architectures'
UpperCamelCase : Dict = '1. Want to contribute a new model?'
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Tuple = f.readlines()
# Find the start of the list.
UpperCamelCase : Any = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
UpperCamelCase : Union[str, Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
def UpperCamelCase ( ) -> Dict:
with open(REPLACE_FILES['init'] , 'r' ) as f:
UpperCamelCase : Any = f.read()
UpperCamelCase : Optional[int] = REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def UpperCamelCase ( snake_case__ : Any=False ) -> Tuple:
UpperCamelCase : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
UpperCamelCase : List[str] = default_version.base_version
elif patch:
UpperCamelCase : Union[str, Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCamelCase : str = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCamelCase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(snake_case__ ) == 0:
UpperCamelCase : Optional[int] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(snake_case__ , patch=snake_case__ )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def UpperCamelCase ( ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = get_version()
UpperCamelCase : Optional[Any] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCamelCase : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
UpperCamelCase : Dict = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(snake_case__ ) == 0:
UpperCamelCase : Any = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(snake_case__ )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 40 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[Any] = "gpt_bigcode"
UpperCAmelCase__ : int = ["past_key_values"]
UpperCAmelCase__ : Optional[int] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, SCREAMING_SNAKE_CASE_=5_0257, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu_pytorch_tanh", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Any:
UpperCamelCase : int = vocab_size
UpperCamelCase : Any = n_positions
UpperCamelCase : Optional[int] = n_embd
UpperCamelCase : int = n_layer
UpperCamelCase : int = n_head
UpperCamelCase : Any = n_inner
UpperCamelCase : int = activation_function
UpperCamelCase : Tuple = resid_pdrop
UpperCamelCase : int = embd_pdrop
UpperCamelCase : Any = attn_pdrop
UpperCamelCase : int = layer_norm_epsilon
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : Dict = scale_attn_weights
UpperCamelCase : Union[str, Any] = use_cache
UpperCamelCase : Dict = attention_softmax_in_fpaa
UpperCamelCase : Any = scale_attention_softmax_in_fpaa
UpperCamelCase : Optional[Any] = multi_query
UpperCamelCase : Any = bos_token_id
UpperCamelCase : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
| 40 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class lowerCAmelCase_ ( Generic[KEY, VAL] ):
UpperCAmelCase__ : KEY
UpperCAmelCase__ : VAL
class lowerCAmelCase_ ( _Item ):
def __init__( self ) -> None:
super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class lowerCAmelCase_ ( MutableMapping[KEY, VAL] ):
def __init__( self, SCREAMING_SNAKE_CASE_ = 8, SCREAMING_SNAKE_CASE_ = 0.75 ) -> None:
UpperCamelCase : Union[str, Any] = initial_block_size
UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCamelCase : List[str] = capacity_factor
UpperCamelCase : Tuple = 0
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return hash(SCREAMING_SNAKE_CASE_ ) % len(self._buckets )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return (ind + 1) % len(self._buckets )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool:
UpperCamelCase : Tuple = self._buckets[ind]
if not stored:
UpperCamelCase : Optional[int] = _Item(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self._len += 1
return True
elif stored.key == key:
UpperCamelCase : Optional[int] = _Item(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return True
else:
return False
def snake_case_ ( self ) -> bool:
UpperCamelCase : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCamelCase : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = self._buckets
UpperCamelCase : str = [None] * new_size
UpperCamelCase : Union[str, Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def snake_case_ ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def snake_case_ ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Iterator[int]:
UpperCamelCase : List[str] = self._get_bucket_index(SCREAMING_SNAKE_CASE_ )
for _ in range(len(self._buckets ) ):
yield ind
UpperCamelCase : Any = self._get_next_ind(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
if self._try_set(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
break
def __setitem__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __delitem__( self, SCREAMING_SNAKE_CASE_ ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE_ )
if item is _deleted:
continue
if item.key == key:
UpperCamelCase : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, SCREAMING_SNAKE_CASE_ ) -> VAL:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
UpperCamelCase : Optional[int] = ' ,'.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 40 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase = 500_000
__UpperCAmelCase , __UpperCAmelCase = os.path.split(__file__)
__UpperCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCamelCase ( snake_case__ : datasets.Dataset , **snake_case__ : Any ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = dataset.map(**snake_case__ )
@get_duration
def UpperCamelCase ( snake_case__ : datasets.Dataset , **snake_case__ : Union[str, Any] ) -> Union[str, Any]:
UpperCamelCase : List[str] = dataset.filter(**snake_case__ )
def UpperCamelCase ( ) -> List[Any]:
UpperCamelCase : Any = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Optional[Any] = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase : Tuple = generate_example_dataset(
os.path.join(snake_case__ , 'dataset.arrow' ) , snake_case__ , num_examples=snake_case__ )
UpperCamelCase : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=snake_case__ )
def tokenize(snake_case__ : str ):
return tokenizer(examples['text'] )
UpperCamelCase : Union[str, Any] = map(snake_case__ )
UpperCamelCase : List[Any] = map(snake_case__ , batched=snake_case__ )
UpperCamelCase : str = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type='numpy' ):
UpperCamelCase : Dict = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type='pandas' ):
UpperCamelCase : int = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
UpperCamelCase : str = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
UpperCamelCase : Optional[int] = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
UpperCamelCase : str = map(snake_case__ , function=snake_case__ , batched=snake_case__ )
UpperCamelCase : Any = filter(snake_case__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(snake_case__ , 'wb' ) as f:
f.write(json.dumps(snake_case__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 40 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCamelCase ( snake_case__ : List[Any] ) -> List[Any]:
UpperCamelCase : str = args.pruning_method
UpperCamelCase : Dict = args.threshold
UpperCamelCase : Optional[int] = args.model_name_or_path.rstrip('/' )
UpperCamelCase : Optional[int] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
UpperCamelCase : Any = torch.load(os.path.join(snake_case__ , 'pytorch_model.bin' ) )
UpperCamelCase : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCamelCase : Union[str, Any] = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
UpperCamelCase : Optional[int] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
UpperCamelCase : int = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
UpperCamelCase : int = MagnitudeBinarizer.apply(inputs=snake_case__ , threshold=snake_case__ )
UpperCamelCase : Dict = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCamelCase : Tuple = name[:-6]
UpperCamelCase : Dict = model[F"""{prefix_}mask_scores"""]
UpperCamelCase : Tuple = TopKBinarizer.apply(snake_case__ , snake_case__ )
UpperCamelCase : int = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCamelCase : str = name[:-6]
UpperCamelCase : Any = model[F"""{prefix_}mask_scores"""]
UpperCamelCase : Any = ThresholdBinarizer.apply(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Optional[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCamelCase : int = name[:-6]
UpperCamelCase : List[str] = model[F"""{prefix_}mask_scores"""]
UpperCamelCase , UpperCamelCase : Dict = -0.1, 1.1
UpperCamelCase : Optional[int] = torch.sigmoid(snake_case__ )
UpperCamelCase : List[str] = s * (r - l) + l
UpperCamelCase : List[str] = s_bar.clamp(min=0.0 , max=1.0 )
UpperCamelCase : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
UpperCamelCase : List[str] = os.path.join(
os.path.dirname(snake_case__ ) , F"""bertarized_{os.path.basename(snake_case__ )}""" )
if not os.path.isdir(snake_case__ ):
shutil.copytree(snake_case__ , snake_case__ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(snake_case__ , os.path.join(snake_case__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__UpperCAmelCase = parser.parse_args()
main(args)
| 40 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__ : Tuple="" ) -> str:
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' )
sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 )
UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) )
UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = 'Hey!'
UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, delta=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Any = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step, 3 )
self.assertEqual(len(accumulator.gradients ), 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2 )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : List[Any] = None
ops.enable_eager_execution_internal()
UpperCamelCase : Any = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
UpperCamelCase : Union[str, Any] = tf.config.list_logical_devices(device_type='CPU' )
UpperCamelCase : int = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
UpperCamelCase : Optional[int] = GradientAccumulator()
UpperCamelCase : Tuple = tf.Variable([4.0, 3.0] )
UpperCamelCase , UpperCamelCase : int = create_optimizer(5e-5, 10, 5 )
UpperCamelCase : List[Any] = tf.Variable([0.0, 0.0], trainable=SCREAMING_SNAKE_CASE_ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
with strategy.scope():
UpperCamelCase : Tuple = strategy.experimental_local_results(SCREAMING_SNAKE_CASE_ )
local_variables[0].assign(SCREAMING_SNAKE_CASE_ )
local_variables[1].assign(SCREAMING_SNAKE_CASE_ )
strategy.run(SCREAMING_SNAKE_CASE_, args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE_ )
def _check_local_values(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value(), SCREAMING_SNAKE_CASE_, tol=1e-2 )
self.assertListAlmostEqual(values[1].value(), SCREAMING_SNAKE_CASE_, tol=1e-2 )
accumulate([1.0, 2.0], [-1.0, 1.0] )
accumulate([3.0, -1.0], [-1.0, -1.0] )
accumulate([-2.0, 2.0], [3.0, -2.0] )
self.assertEqual(accumulator.step, 3 )
_check_local_values([2.0, 3.0], [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
_check_local_values([0.0, 0.0], [0.0, 0.0] )
| 40 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__UpperCAmelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=32 ) -> List[Any]:
set_seed(0 )
UpperCamelCase : str = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE_, in_channels=3, out_channels=3 )
UpperCamelCase : Any = torch.optim.SGD(model.parameters(), lr=0.00_01 )
return model, optimizer
@slow
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[int] = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=1000, beta_start=0.00_01, beta_end=0.02, beta_schedule='linear', clip_sample=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000, beta_start=0.00_01, beta_end=0.02, beta_schedule='linear', clip_sample=SCREAMING_SNAKE_CASE_, )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
UpperCamelCase : List[str] = [torch.randn((4, 3, 32, 32) ).clip(-1, 1 ).to(SCREAMING_SNAKE_CASE_ ) for _ in range(4 )]
UpperCamelCase : Any = [torch.randn((4, 3, 32, 32) ).to(SCREAMING_SNAKE_CASE_ ) for _ in range(4 )]
UpperCamelCase : Tuple = [torch.randint(0, 1000, (4,) ).long().to(SCREAMING_SNAKE_CASE_ ) for _ in range(4 )]
# train with a DDPM scheduler
UpperCamelCase , UpperCamelCase : Dict = self.get_model_optimizer(resolution=32 )
model.train().to(SCREAMING_SNAKE_CASE_ )
for i in range(4 ):
optimizer.zero_grad()
UpperCamelCase : Tuple = ddpm_scheduler.add_noise(clean_images[i], noise[i], timesteps[i] )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, timesteps[i] ).sample
UpperCamelCase : Tuple = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_, noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
UpperCamelCase , UpperCamelCase : Dict = self.get_model_optimizer(resolution=32 )
model.train().to(SCREAMING_SNAKE_CASE_ )
for i in range(4 ):
optimizer.zero_grad()
UpperCamelCase : int = ddim_scheduler.add_noise(clean_images[i], noise[i], timesteps[i] )
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_, timesteps[i] ).sample
UpperCamelCase : str = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_, noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-5 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-5 ) )
| 40 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : str = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : str = hf_hub_download(
repo_id='nateraw/video-demo', filename='archery.mp4', repo_type='dataset' )
UpperCamelCase : Optional[Any] = VideoClassificationPipeline(model=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_, top_k=2 )
UpperCamelCase : str = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
for example in examples:
UpperCamelCase : Any = video_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{'score': ANY(SCREAMING_SNAKE_CASE_ ), 'label': ANY(SCREAMING_SNAKE_CASE_ )},
{'score': ANY(SCREAMING_SNAKE_CASE_ ), 'label': ANY(SCREAMING_SNAKE_CASE_ )},
], )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Any = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase : Tuple = VideoMAEFeatureExtractor(
size={'shortest_edge': 10}, crop_size={'height': 10, 'width': 10} )
UpperCamelCase : List[Any] = pipeline(
'video-classification', model=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, frame_sampling_rate=4 )
UpperCamelCase : Optional[int] = hf_hub_download(repo_id='nateraw/video-demo', filename='archery.mp4', repo_type='dataset' )
UpperCamelCase : List[str] = video_classifier(SCREAMING_SNAKE_CASE_, top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_, decimals=4 ), [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], )
UpperCamelCase : List[str] = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_, decimals=4 ), [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
], )
@require_tf
def snake_case_ ( self ) -> str:
pass
| 40 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
__UpperCAmelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : int = NllbTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, legacy_behaviour=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Optional[int] = vocab_file
UpperCamelCase : int = False if not self.vocab_file else True
UpperCamelCase : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase : Optional[int] = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase : str = src_lang
UpperCamelCase : List[str] = self(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = tgt_lang_id
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "eng_Latn", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "fra_Latn", **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
UpperCamelCase : Union[str, Any] = src_lang
UpperCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
if self.legacy_behaviour:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : Optional[int] = [self.cur_lang_code]
UpperCamelCase : List[str] = [self.eos_token_id]
UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
if self.legacy_behaviour:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Any = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : int = [self.cur_lang_code]
UpperCamelCase : Tuple = [self.eos_token_id]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 1 |
__UpperCAmelCase = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 40 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase ( snake_case__ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
UpperCamelCase : str = []
if isinstance(snake_case__ , snake_case__ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case__ ) )
elif isinstance(snake_case__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case__ ) )
elif isinstance(snake_case__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def UpperCamelCase ( snake_case__ : int , snake_case__ : Tuple[int, ...] ) -> Tuple[int, ...]:
UpperCamelCase : Optional[Any] = []
for d in reversed(snake_case__ ):
idx.append(flat_idx % d )
UpperCamelCase : Optional[Any] = flat_idx // d
return tuple(reversed(snake_case__ ) )
@torch.jit.ignore
def UpperCamelCase ( snake_case__ : Sequence[int] , snake_case__ : Sequence[int] , snake_case__ : Sequence[int] , snake_case__ : Optional[Sequence[bool]] = None , snake_case__ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(snake_case__ : List[bool] ) -> None:
UpperCamelCase : Optional[Any] = True
for i in range(len(snake_case__ ) ):
UpperCamelCase : int = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase : Dict = [s == 0 for s in start]
reduce_edge_list(snake_case__ )
if end_edges is None:
UpperCamelCase : int = [e == (d - 1) for e, d in zip(snake_case__ , snake_case__ )]
reduce_edge_list(snake_case__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case__ ) == 0:
return [()]
elif len(snake_case__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCamelCase : List[Tuple[slice, ...]] = []
UpperCamelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case__ , snake_case__ ):
if s == e:
path_list.append(slice(snake_case__ , s + 1 ) )
else:
break
UpperCamelCase : Tuple[slice, ...] = tuple(snake_case__ )
UpperCamelCase : str = len(snake_case__ )
# start == end, and we're done
if divergence_idx == len(snake_case__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase : Optional[int] = start[divergence_idx]
return tuple(
path + (slice(snake_case__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase : List[Any] = end[divergence_idx]
return tuple(
path + (slice(snake_case__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCamelCase : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCamelCase ( snake_case__ : torch.Tensor , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> torch.Tensor:
UpperCamelCase : Union[str, Any] = t.shape[:no_batch_dims]
UpperCamelCase : str = list(_flat_idx_to_idx(snake_case__ , snake_case__ ) )
# _get_minimal_slice_set is inclusive
UpperCamelCase : Optional[Any] = list(_flat_idx_to_idx(flat_end - 1 , snake_case__ ) )
# Get an ordered list of slices to perform
UpperCamelCase : Union[str, Any] = _get_minimal_slice_set(
snake_case__ , snake_case__ , snake_case__ , )
UpperCamelCase : Union[str, Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCamelCase ( snake_case__ : Callable , snake_case__ : Dict[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : bool = False , snake_case__ : Any = None , snake_case__ : bool = False , ) -> Any:
if not (len(snake_case__ ) > 0):
raise ValueError('Must provide at least one input' )
UpperCamelCase : Optional[int] = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case__ )]
UpperCamelCase : Dict = tuple([max(snake_case__ ) for s in zip(*snake_case__ )] )
def _prep_inputs(snake_case__ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCamelCase : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCamelCase : List[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCamelCase : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCamelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , snake_case__ )
UpperCamelCase : List[Any] = None
if _out is not None:
UpperCamelCase : List[Any] = tensor_tree_map(lambda snake_case__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCamelCase : List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case__ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase : Any = 0
UpperCamelCase : List[Any] = prepped_outputs
for _ in range(snake_case__ ):
# Chunk the input
if not low_mem:
UpperCamelCase : Tuple = _select_chunk
else:
UpperCamelCase : int = partial(
_chunk_slice , flat_start=snake_case__ , flat_end=min(snake_case__ , i + chunk_size ) , no_batch_dims=len(snake_case__ ) , )
UpperCamelCase : Dict[str, Any] = tensor_tree_map(snake_case__ , snake_case__ )
# Run the layer on the chunk
UpperCamelCase : List[Any] = layer(**snake_case__ )
# Allocate space for the output
if out is None:
UpperCamelCase : str = tensor_tree_map(lambda snake_case__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case__ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case__ , snake_case__ ):
def assign(snake_case__ : dict , snake_case__ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case__ , snake_case__ ):
assign(snake_case__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase : Optional[Any] = da[k]
assign(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
for xa, xa in zip(snake_case__ , snake_case__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase : str = xa
elif isinstance(snake_case__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase : Optional[Any] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
UpperCamelCase : Tuple = tensor_tree_map(lambda snake_case__ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case__ )
return out
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ = 512, ) -> List[Any]:
UpperCamelCase : int = max_chunk_size
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[tuple] = None
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2 ) ) + 1 )]
UpperCamelCase : Tuple = [c for c in candidates if c > min_chunk_size]
UpperCamelCase : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(SCREAMING_SNAKE_CASE_ ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE_, chunk_size=SCREAMING_SNAKE_CASE_ )
return True
except RuntimeError:
return False
UpperCamelCase : str = 0
UpperCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase : Optional[Any] = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase : Union[str, Any] = i
UpperCamelCase : str = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool:
UpperCamelCase : Any = True
for aa, aa in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = [v for _, v in sorted(aa.items(), key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
UpperCamelCase : Union[str, Any] = [v for _, v in sorted(aa.items(), key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
else:
consistent &= aa == aa
return consistent
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> int:
UpperCamelCase : int = True
UpperCamelCase : tuple = tree_map(lambda SCREAMING_SNAKE_CASE_ : a.shape if isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) else a, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self._compare_arg_caches(self.cached_arg_data, SCREAMING_SNAKE_CASE_ )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase : Tuple = False
if not consistent:
UpperCamelCase : Union[str, Any] = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 1 |
from math import isqrt
def UpperCamelCase ( snake_case__ : int ) -> list[int]:
UpperCamelCase : Union[str, Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__ ):
UpperCamelCase : List[str] = False
return [i for i in range(2 , snake_case__ ) if is_prime[i]]
def UpperCamelCase ( snake_case__ : int = 10**8 ) -> int:
UpperCamelCase : Union[str, Any] = calculate_prime_numbers(max_number // 2 )
UpperCamelCase : List[Any] = 0
UpperCamelCase : List[Any] = 0
UpperCamelCase : Tuple = len(snake_case__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 40 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> Dict:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Union[str, Any]=True ) -> Tuple:
model.train()
UpperCamelCase : Union[str, Any] = model(snake_case__ )
UpperCamelCase : Optional[Any] = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Any=False ) -> List[str]:
set_seed(42 )
UpperCamelCase : Tuple = RegressionModel()
UpperCamelCase : Dict = deepcopy(snake_case__ )
UpperCamelCase : Tuple = RegressionDataset(length=80 )
UpperCamelCase : Dict = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCamelCase : str = AdamW(params=model.parameters() , lr=1E-3 )
UpperCamelCase : Dict = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCamelCase : Optional[int] = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
UpperCamelCase : int = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase ( snake_case__ : str ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = get_training_setup(snake_case__ )
# Use a single batch
UpperCamelCase , UpperCamelCase : Any = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCamelCase : str = ddp_input[torch.randperm(len(snake_case__ ) )]
def UpperCamelCase ( snake_case__ : int ) -> Tuple:
# Test on distributed setup that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = get_training_setup(snake_case__ )
# Use a single batch
UpperCamelCase , UpperCamelCase : Dict = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase : int = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCamelCase : str = ddp_input[torch.randperm(len(snake_case__ ) )]
def UpperCamelCase ( snake_case__ : Optional[int]=False , snake_case__ : Dict=False ) -> Tuple:
UpperCamelCase : Tuple = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
UpperCamelCase , UpperCamelCase : Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCamelCase : Any = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def UpperCamelCase ( snake_case__ : Optional[int]=False , snake_case__ : List[Any]=False ) -> Union[str, Any]:
UpperCamelCase : List[Any] = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
UpperCamelCase , UpperCamelCase : List[str] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCamelCase : int = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def UpperCamelCase ( ) -> Optional[int]:
UpperCamelCase : Any = Accelerator()
UpperCamelCase : int = RegressionDataset(length=80 )
UpperCamelCase : Dict = DataLoader(snake_case__ , batch_size=16 )
UpperCamelCase : Optional[Any] = RegressionDataset(length=96 )
UpperCamelCase : List[str] = DataLoader(snake_case__ , batch_size=16 )
UpperCamelCase , UpperCamelCase : List[Any] = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase ( ) -> int:
UpperCamelCase : str = Accelerator()
UpperCamelCase : List[Any] = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 40 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 | 1 |
def UpperCamelCase ( snake_case__ : list ) -> list:
if len(snake_case__ ) <= 1:
return lst
UpperCamelCase : str = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCamelCase , UpperCamelCase : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCamelCase : List[str] = 1
return lst
if __name__ == "__main__":
__UpperCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 40 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 1 |
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : List[Any] = name
UpperCamelCase : Tuple = val
def __str__( self ) -> List[str]:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self, SCREAMING_SNAKE_CASE_ ) -> int:
return self.val < other.val
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : List[Any] = {}
UpperCamelCase : Dict = {}
UpperCamelCase : Optional[int] = self.build_heap(SCREAMING_SNAKE_CASE_ )
def __getitem__( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.get_value(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return (idx - 1) // 2
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return idx * 2 + 1
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return idx * 2 + 2
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return self.heap_dict[key]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
UpperCamelCase : Optional[Any] = self.get_parent_idx(SCREAMING_SNAKE_CASE_ )
for idx, i in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = idx
UpperCamelCase : Dict = i.val
for i in range(SCREAMING_SNAKE_CASE_, -1, -1 ):
self.sift_down(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return array
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
while True:
UpperCamelCase : Optional[int] = self.get_left_child_idx(SCREAMING_SNAKE_CASE_ ) # noqa: E741
UpperCamelCase : Union[str, Any] = self.get_right_child_idx(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = idx
if l < len(SCREAMING_SNAKE_CASE_ ) and array[l] < array[idx]:
UpperCamelCase : Tuple = l
if r < len(SCREAMING_SNAKE_CASE_ ) and array[r] < array[smallest]:
UpperCamelCase : Union[str, Any] = r
if smallest != idx:
UpperCamelCase , UpperCamelCase : Any = array[smallest], array[idx]
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCamelCase : Union[str, Any] = smallest
else:
break
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Union[str, Any] = self.get_parent_idx(SCREAMING_SNAKE_CASE_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCamelCase , UpperCamelCase : int = self.heap[idx], self.heap[p]
UpperCamelCase , UpperCamelCase : Optional[int] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCamelCase : int = p
UpperCamelCase : Optional[Any] = self.get_parent_idx(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
return self.heap[0]
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.heap[-1], self.heap[0]
UpperCamelCase , UpperCamelCase : Union[str, Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCamelCase : Union[str, Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap )
return x
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
self.heap.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = len(self.heap ) - 1
UpperCamelCase : Dict = node.val
self.sift_up(len(self.heap ) - 1 )
def snake_case_ ( self ) -> Union[str, Any]:
return len(self.heap ) == 0
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCamelCase : str = new_value
UpperCamelCase : Tuple = new_value
self.sift_up(self.idx_of_element[node] )
__UpperCAmelCase = Node('''R''', -1)
__UpperCAmelCase = Node('''B''', 6)
__UpperCAmelCase = Node('''A''', 3)
__UpperCAmelCase = Node('''X''', 1)
__UpperCAmelCase = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__UpperCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCamelCase ( snake_case__ : str , snake_case__ : str , **snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
UpperCamelCase : int = AutoModelForSeqaSeqLM.from_config(snake_case__ )
model.save_pretrained(snake_case__ )
AutoTokenizer.from_pretrained(snake_case__ ).save_pretrained(snake_case__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 40 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__UpperCAmelCase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__UpperCAmelCase = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
)
__UpperCAmelCase = '''|'''.join(sys.argv[1:])
__UpperCAmelCase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
__UpperCAmelCase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 40 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = RobertaTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : Optional[int] = tuple(state['cls'] )
UpperCamelCase : Any = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : List[Any] = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple:
UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = '''PoolFormerConfig'''
# Base docstring
__UpperCAmelCase = '''sail/poolformer_s12'''
__UpperCAmelCase = [1, 512, 7, 7]
# Image classification docstring
__UpperCAmelCase = '''sail/poolformer_s12'''
__UpperCAmelCase = '''tabby, tabby cat'''
__UpperCAmelCase = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : float = 0.0 , snake_case__ : bool = False ) -> Union[str, Any]:
if drop_prob == 0.0 or not training:
return input
UpperCamelCase : List[str] = 1 - drop_prob
UpperCamelCase : str = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
UpperCamelCase : str = keep_prob + torch.rand(snake_case__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
UpperCamelCase : int = input.div(snake_case__ ) * random_tensor
return output
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_ = None ) -> None:
super().__init__()
UpperCamelCase : int = drop_prob
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
return drop_path(SCREAMING_SNAKE_CASE_, self.drop_prob, self.training )
def snake_case_ ( self ) -> str:
return "p={}".format(self.drop_prob )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Dict:
super().__init__()
UpperCamelCase : Optional[Any] = patch_size if isinstance(SCREAMING_SNAKE_CASE_, collections.abc.Iterable ) else (patch_size, patch_size)
UpperCamelCase : Tuple = stride if isinstance(SCREAMING_SNAKE_CASE_, collections.abc.Iterable ) else (stride, stride)
UpperCamelCase : Tuple = padding if isinstance(SCREAMING_SNAKE_CASE_, collections.abc.Iterable ) else (padding, padding)
UpperCamelCase : List[Any] = nn.Convad(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, kernel_size=SCREAMING_SNAKE_CASE_, stride=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = norm_layer(SCREAMING_SNAKE_CASE_ ) if norm_layer else nn.Identity()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : str = self.projection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.norm(SCREAMING_SNAKE_CASE_ )
return embeddings
class lowerCAmelCase_ ( nn.GroupNorm ):
def __init__( self, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> str:
super().__init__(1, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__()
UpperCamelCase : List[Any] = nn.AvgPoolad(SCREAMING_SNAKE_CASE_, stride=1, padding=pool_size // 2, count_include_pad=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.pool(SCREAMING_SNAKE_CASE_ ) - hidden_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
super().__init__()
UpperCamelCase : Union[str, Any] = nn.Convad(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1 )
UpperCamelCase : str = nn.Convad(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1 )
UpperCamelCase : Tuple = PoolFormerDropPath(SCREAMING_SNAKE_CASE_ )
if isinstance(config.hidden_act, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = ACTaFN[config.hidden_act]
else:
UpperCamelCase : Optional[Any] = config.hidden_act
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Optional[int] = self.conva(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.act_fn(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.drop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.drop(SCREAMING_SNAKE_CASE_ )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
super().__init__()
UpperCamelCase : int = PoolFormerPooling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = PoolFormerOutput(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE_ )
# Useful for training neural nets
UpperCamelCase : List[str] = PoolFormerDropPath(SCREAMING_SNAKE_CASE_ ) if drop_path > 0.0 else nn.Identity()
UpperCamelCase : Any = config.use_layer_scale
if config.use_layer_scale:
UpperCamelCase : int = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE_) ), requires_grad=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE_) ), requires_grad=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
if self.use_layer_scale:
UpperCamelCase : Tuple = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
UpperCamelCase : Tuple = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = ()
UpperCamelCase : List[str] = self.output(self.after_norm(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
UpperCamelCase : List[Any] = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = (output,) + outputs
return outputs
else:
UpperCamelCase : Tuple = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE_ ) ) )
# First residual connection
UpperCamelCase : Optional[int] = pooling_output + hidden_states
UpperCamelCase : Any = ()
# Second residual connection inside the PoolFormerOutput block
UpperCamelCase : Tuple = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : int = hidden_states + layer_output
UpperCamelCase : Optional[Any] = (output,) + outputs
return outputs
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__()
UpperCamelCase : int = config
# stochastic depth decay rule
UpperCamelCase : int = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths ) )]
# patch embeddings
UpperCamelCase : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i], stride=config.strides[i], padding=config.padding[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) )
UpperCamelCase : List[str] = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
# Transformer blocks
UpperCamelCase : Tuple = []
UpperCamelCase : str = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
UpperCamelCase : Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE_, num_channels=config.hidden_sizes[i], pool_size=config.pool_size, hidden_size=config.hidden_sizes[i], intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ), drop_path=dpr[cur + j], ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True ) -> List[str]:
UpperCamelCase : Union[str, Any] = () if output_hidden_states else None
UpperCamelCase : List[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings, self.block ) ):
UpperCamelCase , UpperCamelCase : List[Any] = layers
# Get patch embeddings from hidden_states
UpperCamelCase : Optional[Any] = embedding_layer(SCREAMING_SNAKE_CASE_ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = blk(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = layer_outputs[0]
if output_hidden_states:
UpperCamelCase : Optional[int] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_, hidden_states=SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Dict = PoolFormerConfig
UpperCAmelCase__ : List[str] = "poolformer"
UpperCAmelCase__ : Optional[Any] = "pixel_values"
UpperCAmelCase__ : List[Any] = True
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if isinstance(SCREAMING_SNAKE_CASE_, (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE_, nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> int:
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = value
__UpperCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , a__ , )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = config
UpperCamelCase : Optional[int] = PoolFormerEncoder(SCREAMING_SNAKE_CASE_ )
# Initialize weights and apply final processing
self.post_init()
def snake_case_ ( self ) -> Dict:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=SCREAMING_SNAKE_CASE_, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
UpperCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
UpperCamelCase : int = self.encoder(
SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, return_dict=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Dict = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_, hidden_states=encoder_outputs.hidden_states, )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
super().__init__()
UpperCamelCase : str = nn.Linear(config.hidden_size, config.hidden_size )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Tuple = self.dense(SCREAMING_SNAKE_CASE_ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , a__ , )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = config.num_labels
UpperCamelCase : List[Any] = PoolFormerModel(SCREAMING_SNAKE_CASE_ )
# Final norm
UpperCamelCase : Dict = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
UpperCamelCase : List[str] = (
nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=SCREAMING_SNAKE_CASE_, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
UpperCamelCase : int = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = self.poolformer(
SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, return_dict=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[Any] = outputs[0]
UpperCamelCase : Optional[int] = self.classifier(self.norm(SCREAMING_SNAKE_CASE_ ).mean([-2, -1] ) )
UpperCamelCase : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase : Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase : Union[str, Any] = 'single_label_classification'
else:
UpperCamelCase : Union[str, Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCamelCase : Optional[int] = MSELoss()
if self.num_labels == 1:
UpperCamelCase : Tuple = loss_fct(logits.squeeze(), labels.squeeze() )
else:
UpperCamelCase : List[str] = loss_fct(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase : List[str] = CrossEntropyLoss()
UpperCamelCase : Tuple = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase : Tuple = BCEWithLogitsLoss()
UpperCamelCase : Optional[Any] = loss_fct(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if not return_dict:
UpperCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_, logits=SCREAMING_SNAKE_CASE_, hidden_states=outputs.hidden_states )
| 40 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = CTRLTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = False
def snake_case_ ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
UpperCamelCase : Any = dict(zip(SCREAMING_SNAKE_CASE_, range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : Union[str, Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
UpperCamelCase : List[Any] = {'unk_token': '<unk>'}
UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = 'adapt react readapt apt'
UpperCamelCase : Optional[Any] = 'adapt react readapt apt'
return input_text, output_text
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : str = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase : Union[str, Any] = 'adapt react readapt apt'
UpperCamelCase : int = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
UpperCamelCase : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = tokens + [tokenizer.unk_token]
UpperCamelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
| 40 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.