code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ( ) -> Generator[int, None, None]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[Any] = 0, 1
while True:
__UpperCAmelCase , __UpperCAmelCase : int = b, a + b
yield b
def __SCREAMING_SNAKE_CASE ( lowercase_ = 1000 ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Union[str, Any] = fibonacci_generator()
while len(str(next(lowercase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 462 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCAmelCase = random.Random()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=1.0 , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__UpperCAmelCase : str = global_rng
__UpperCAmelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=4_0_0 , lowercase__=2_0_0_0 , lowercase__=2_0_4_8 , lowercase__=1_2_8 , lowercase__=1 , lowercase__=5_1_2 , lowercase__=3_0 , lowercase__=4_4_1_0_0 , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : int = min_seq_length
__UpperCAmelCase : List[str] = max_seq_length
__UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Any = spectrogram_length
__UpperCAmelCase : List[Any] = feature_size
__UpperCAmelCase : Union[str, Any] = num_audio_channels
__UpperCAmelCase : Optional[int] = hop_length
__UpperCAmelCase : Tuple = chunk_length
__UpperCAmelCase : Any = sampling_rate
def A( self):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A( self , lowercase__=False , lowercase__=False):
def _flatten(lowercase__):
return list(itertools.chain(*lowercase__))
if equal_length:
__UpperCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[str] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__UpperCAmelCase : List[str] = [np.asarray(lowercase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = TvltFeatureExtractor
def A( self):
__UpperCAmelCase : Dict = TvltFeatureExtractionTester(self)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowercase__ , '''spectrogram_length'''))
self.assertTrue(hasattr(lowercase__ , '''feature_size'''))
self.assertTrue(hasattr(lowercase__ , '''num_audio_channels'''))
self.assertTrue(hasattr(lowercase__ , '''hop_length'''))
self.assertTrue(hasattr(lowercase__ , '''chunk_length'''))
self.assertTrue(hasattr(lowercase__ , '''sampling_rate'''))
def A( self):
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(lowercase__)[0]
check_json_file_has_correct_format(lowercase__)
__UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Union[str, Any] = dict_first.pop('''mel_filters''')
__UpperCAmelCase : Union[str, Any] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase__)
__UpperCAmelCase : str = self.feature_extraction_class.from_json_file(lowercase__)
__UpperCAmelCase : Any = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Tuple = dict_first.pop('''mel_filters''')
__UpperCAmelCase : List[str] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
# Initialize feature_extractor
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__UpperCAmelCase : int = [np.asarray(lowercase__) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
__UpperCAmelCase : List[str] = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
__UpperCAmelCase : Tuple = feature_extractor(
lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowercase__).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : Any = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCAmelCase : Optional[Any] = np.asarray(lowercase__)
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
__UpperCAmelCase : int = ds.sort('''id''').select(range(lowercase__))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A( self):
__UpperCAmelCase : Optional[Any] = self._load_datasamples(1)
__UpperCAmelCase : Tuple = TvltFeatureExtractor()
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''pt''').audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8))
__UpperCAmelCase : int = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase__ , atol=1e-4))
| 462 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _SCREAMING_SNAKE_CASE ( a ) -> list[tuple[int, int]]:
__A : Any = 0
__A : Tuple = len(a ) # No of vertices in graph
__A : int = [0] * n
__A : Tuple = [False] * n
def dfs(a , a , a , a ):
__A : Union[str, Any] = True
__A : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(a , a , a , id_ )
__A : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__A : str = min(low[at] , low[to] )
__A : list[tuple[int, int]] = []
for i in range(a ):
if not visited[i]:
dfs(a , -1 , a , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Any = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCAmelCase : List[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = BertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
__A : Any = getattr(_A , normalizer_state.pop('type' ) )
__A : Union[str, Any] = do_lower_case
__A : Optional[int] = strip_accents
__A : List[Any] = tokenize_chinese_chars
__A : int = normalizer_class(**_A )
__A : Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[Any] = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 77 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class __lowercase ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = (16, 32, 96, 256)
__UpperCAmelCase = jnp.floataa
def _a ( self) -> int:
__snake_case = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case = []
for i in range(len(self.block_out_channels) - 1):
__snake_case = self.block_out_channels[i]
__snake_case = self.block_out_channels[i + 1]
__snake_case = nn.Conv(
lowercase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_)
__snake_case = nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_)
__snake_case = blocks
__snake_case = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_) -> str:
__snake_case = self.conv_in(lowercase_)
__snake_case = nn.silu(lowercase_)
for block in self.blocks:
__snake_case = block(lowercase_)
__snake_case = nn.silu(lowercase_)
__snake_case = self.conv_out(lowercase_)
return embedding
@flax_register_to_config
class __lowercase ( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = 32
__UpperCAmelCase = 4
__UpperCAmelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__UpperCAmelCase = False
__UpperCAmelCase = (320, 640, 1_280, 1_280)
__UpperCAmelCase = 2
__UpperCAmelCase = 8
__UpperCAmelCase = None
__UpperCAmelCase = 1_280
__UpperCAmelCase = 0.0
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
__UpperCAmelCase = True
__UpperCAmelCase = 0
__UpperCAmelCase = "rgb"
__UpperCAmelCase = (16, 32, 96, 256)
def _a ( self , lowercase_) -> FrozenDict:
# init input tensors
__snake_case = (1, self.in_channels, self.sample_size, self.sample_size)
__snake_case = jnp.zeros(lowercase_ , dtype=jnp.floataa)
__snake_case = jnp.ones((1,) , dtype=jnp.intaa)
__snake_case = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
__snake_case = (1, 3, self.sample_size * 8, self.sample_size * 8)
__snake_case = jnp.zeros(lowercase_ , dtype=jnp.floataa)
__snake_case , __snake_case = jax.random.split(lowercase_)
__snake_case = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)["params"]
def _a ( self) -> Tuple:
__snake_case = self.block_out_channels
__snake_case = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__snake_case = self.num_attention_heads or self.attention_head_dim
# input
__snake_case = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__snake_case = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
__snake_case = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype)
__snake_case = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__snake_case = self.only_cross_attention
if isinstance(lowercase_ , lowercase_):
__snake_case = (only_cross_attention,) * len(self.down_block_types)
if isinstance(lowercase_ , lowercase_):
__snake_case = (num_attention_heads,) * len(self.down_block_types)
# down
__snake_case = []
__snake_case = []
__snake_case = block_out_channels[0]
__snake_case = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_)
for i, down_block_type in enumerate(self.down_block_types):
__snake_case = output_channel
__snake_case = block_out_channels[i]
__snake_case = i == len(lowercase_) - 1
if down_block_type == "CrossAttnDownBlock2D":
__snake_case = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__snake_case = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_)
for _ in range(self.layers_per_block):
__snake_case = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_)
if not is_final_block:
__snake_case = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_)
__snake_case = down_blocks
__snake_case = controlnet_down_blocks
# mid
__snake_case = block_out_channels[-1]
__snake_case = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__snake_case = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1.0 , lowercase_ = True , lowercase_ = False , ) -> Union[FlaxControlNetOutput, Tuple]:
__snake_case = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__snake_case = jnp.flip(lowercase_ , axis=1)
# 1. time
if not isinstance(lowercase_ , jnp.ndarray):
__snake_case = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(lowercase_ , jnp.ndarray) and len(timesteps.shape) == 0:
__snake_case = timesteps.astype(dtype=jnp.floataa)
__snake_case = jnp.expand_dims(lowercase_ , 0)
__snake_case = self.time_proj(lowercase_)
__snake_case = self.time_embedding(lowercase_)
# 2. pre-process
__snake_case = jnp.transpose(lowercase_ , (0, 2, 3, 1))
__snake_case = self.conv_in(lowercase_)
__snake_case = jnp.transpose(lowercase_ , (0, 2, 3, 1))
__snake_case = self.controlnet_cond_embedding(lowercase_)
sample += controlnet_cond
# 3. down
__snake_case = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_):
__snake_case , __snake_case = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train)
else:
__snake_case , __snake_case = down_block(lowercase_ , lowercase_ , deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
__snake_case = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train)
# 5. contronet blocks
__snake_case = ()
for down_block_res_sample, controlnet_block in zip(lowercase_ , self.controlnet_down_blocks):
__snake_case = controlnet_block(lowercase_)
controlnet_down_block_res_samples += (down_block_res_sample,)
__snake_case = controlnet_down_block_res_samples
__snake_case = self.controlnet_mid_block(lowercase_)
# 6. scaling
__snake_case = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase_ , mid_block_res_sample=lowercase_)
| 313 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowercase :
def __init__( self , lowercase_ = "cpu" , lowercase_ = "openai/clip-vit-large-patch14") -> None:
__snake_case = device
__snake_case = CLIPTokenizerFast.from_pretrained(lowercase_)
__snake_case = [0.4814_5466, 0.457_8275, 0.4082_1073]
__snake_case = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std)
__snake_case = torchvision.transforms.Resize(2_2_4)
__snake_case = torchvision.transforms.CenterCrop(2_2_4)
def _a ( self , lowercase_) -> int:
__snake_case = self.resize(lowercase_)
__snake_case = self.center_crop(lowercase_)
__snake_case = self.normalize(lowercase_)
return images
def __call__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Union[str, Any]:
__snake_case = self.tokenizer(text=lowercase_ , **lowercase_)
__snake_case = self.preprocess_img(lowercase_)
__snake_case = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class __lowercase ( nn.Module ):
def __init__( self , lowercase_=1_0 , lowercase_=0.01 , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_="image" , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , ) -> None:
super().__init__()
__snake_case = None
__snake_case = device if device else get_device()
if vqgan:
__snake_case = vqgan
else:
__snake_case = load_vqgan(self.device , conf_path=lowercase_ , ckpt_path=lowercase_)
self.vqgan.eval()
if clip:
__snake_case = clip
else:
__snake_case = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')
self.clip.to(self.device)
__snake_case = ProcessorGradientFlow(device=self.device)
__snake_case = iterations
__snake_case = lr
__snake_case = log
__snake_case = make_grid
__snake_case = return_val
__snake_case = quantize
__snake_case = self.vqgan.decoder.z_shape
def _a ( self , lowercase_=None , lowercase_=None , lowercase_=5 , lowercase_=True) -> List[str]:
__snake_case = []
if output_path is None:
__snake_case = './animation.gif'
if input_path is None:
__snake_case = self.save_path
__snake_case = sorted(glob(input_path + '/*'))
if not len(lowercase_):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)')
if len(lowercase_) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)')
__snake_case = total_duration / len(lowercase_)
__snake_case = [frame_duration] * len(lowercase_)
if extend_frames:
__snake_case = 1.5
__snake_case = 3
for file_name in paths:
if file_name.endswith('.png'):
images.append(imageio.imread(lowercase_))
imageio.mimsave(lowercase_ , lowercase_ , duration=lowercase_)
print(F"gif saved to {output_path}")
def _a ( self , lowercase_=None , lowercase_=None) -> Union[str, Any]:
if not (path or img):
raise ValueError('Input either path or tensor')
if img is not None:
raise NotImplementedError
__snake_case = preprocess(Image.open(lowercase_) , target_image_size=2_5_6).to(self.device)
__snake_case = preprocess_vqgan(lowercase_)
__snake_case , *__snake_case = self.vqgan.encode(lowercase_)
return z
def _a ( self , lowercase_) -> Dict:
__snake_case = self.latent.detach().requires_grad_()
__snake_case = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case = self.vqgan.quantize(lowercase_)
else:
__snake_case = trans_latent
return self.vqgan.decode(lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_=None) -> Any:
__snake_case = self.clip_preprocessor(text=lowercase_ , images=lowercase_ , return_tensors='pt' , padding=lowercase_)
__snake_case = self.clip(**lowercase_)
__snake_case = clip_outputs.logits_per_image
if weights is not None:
__snake_case = similarity_logits * weights
return similarity_logits.sum()
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self._get_clip_similarity(pos_prompts['prompts'] , lowercase_ , weights=(1 / pos_prompts['weights']))
if neg_prompts:
__snake_case = self._get_clip_similarity(neg_prompts['prompts'] , lowercase_ , weights=neg_prompts['weights'])
else:
__snake_case = torch.tensor([1] , device=self.device)
__snake_case = -torch.log(lowercase_) + torch.log(lowercase_)
return loss
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
__snake_case = torch.randn_like(self.latent , requires_grad=lowercase_ , device=self.device)
__snake_case = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
__snake_case = self._add_vector(lowercase_)
__snake_case = loop_post_process(lowercase_)
__snake_case = self._get_CLIP_loss(lowercase_ , lowercase_ , lowercase_)
print('CLIP loss' , lowercase_)
if self.log:
wandb.log({'CLIP Loss': clip_loss})
clip_loss.backward(retain_graph=lowercase_)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
wandb.init(reinit=lowercase_ , project='face-editor')
wandb.config.update({'Positive Prompts': positive_prompts})
wandb.config.update({'Negative Prompts': negative_prompts})
wandb.config.update({'lr': self.lr, 'iterations': self.iterations})
if image_path:
__snake_case = Image.open(lowercase_)
__snake_case = image.resize((2_5_6, 2_5_6))
wandb.log('Original Image' , wandb.Image(lowercase_))
def _a ( self , lowercase_) -> Optional[int]:
if not prompts:
return []
__snake_case = []
__snake_case = []
if isinstance(lowercase_ , lowercase_):
__snake_case = [prompt.strip() for prompt in prompts.split('|')]
for prompt in prompts:
if isinstance(lowercase_ , (tuple, list)):
__snake_case = prompt[0]
__snake_case = float(prompt[1])
elif ":" in prompt:
__snake_case , __snake_case = prompt.split(':')
__snake_case = float(lowercase_)
else:
__snake_case = prompt
__snake_case = 1.0
processed_prompts.append(lowercase_)
weights.append(lowercase_)
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase_ , device=self.device),
}
def _a ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=None , ) -> List[str]:
if image_path:
__snake_case = self._get_latent(lowercase_)
else:
__snake_case = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(lowercase_ , lowercase_ , lowercase_)
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case = self.process_prompts(lowercase_)
__snake_case = self.process_prompts(lowercase_)
if save_final and save_path is None:
__snake_case = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts']))
if not os.path.exists(lowercase_):
os.makedirs(lowercase_)
else:
__snake_case = save_path + '_' + get_timestamp()
os.makedirs(lowercase_)
__snake_case = save_path
__snake_case = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print('Original Image')
show_pil(custom_to_pil(lowercase_))
__snake_case = loop_post_process(lowercase_)
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase_ , lowercase_ , lowercase_)):
if show_intermediate:
show_pil(lowercase_)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png"))
if self.log:
wandb.log({'Image': wandb.Image(lowercase_)})
if show_final:
show_pil(lowercase_)
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png"))
| 313 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
_UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_UpperCAmelCase = np.concatenate(_lowerCAmelCase , axis=0 )
_UpperCAmelCase = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = 2.0 * image - 1.0
_UpperCAmelCase = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(_lowerCAmelCase , dim=0 )
return image
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0.9995 ) -> Optional[int]:
if not isinstance(_lowerCAmelCase , np.ndarray ):
_UpperCAmelCase = True
_UpperCAmelCase = va.device
_UpperCAmelCase = va.cpu().numpy()
_UpperCAmelCase = va.cpu().numpy()
_UpperCAmelCase = np.sum(va * va / (np.linalg.norm(_lowerCAmelCase ) * np.linalg.norm(_lowerCAmelCase )) )
if np.abs(_lowerCAmelCase ) > DOT_THRESHOLD:
_UpperCAmelCase = (1 - t) * va + t * va
else:
_UpperCAmelCase = np.arccos(_lowerCAmelCase )
_UpperCAmelCase = np.sin(_lowerCAmelCase )
_UpperCAmelCase = theta_a * t
_UpperCAmelCase = np.sin(_lowerCAmelCase )
_UpperCAmelCase = np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCAmelCase = sin_theta_t / sin_theta_a
_UpperCAmelCase = sa * va + sa * va
if inputs_are_torch:
_UpperCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
return va
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
_UpperCAmelCase = F.normalize(_lowerCAmelCase , dim=-1 )
_UpperCAmelCase = F.normalize(_lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
for param in model.parameters():
_UpperCAmelCase = value
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[Any] , __UpperCamelCase : AutoencoderKL , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __UpperCamelCase : CLIPFeatureExtractor , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : Dict=None , ):
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , clip_model=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , coca_model=__UpperCamelCase , coca_tokenizer=__UpperCamelCase , coca_transform=__UpperCamelCase , )
_UpperCAmelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , __UpperCamelCase )
else feature_extractor.size["shortest_edge"]
)
_UpperCAmelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __UpperCamelCase )
set_requires_grad(self.clip_model , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
self.enable_attention_slicing(__UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
set_requires_grad(self.vae , __UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
set_requires_grad(self.vae , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
set_requires_grad(self.unet , __UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
set_requires_grad(self.unet , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] ):
# get the original timestep using init_timestep
_UpperCAmelCase = min(int(num_inference_steps * strength ) , __UpperCamelCase )
_UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int]=None ):
if not isinstance(__UpperCamelCase , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__UpperCamelCase )}''' )
_UpperCAmelCase = image.to(device=__UpperCamelCase , dtype=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCamelCase )
]
_UpperCAmelCase = torch.cat(__UpperCamelCase , dim=0 )
else:
_UpperCAmelCase = self.vae.encode(__UpperCamelCase ).latent_dist.sample(__UpperCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase = 0.18215 * init_latents
_UpperCAmelCase = init_latents.repeat_interleave(__UpperCamelCase , dim=0 )
_UpperCAmelCase = randn_tensor(init_latents.shape , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
# get latents
_UpperCAmelCase = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Any ):
_UpperCAmelCase = self.coca_transform(__UpperCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCAmelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_UpperCAmelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : str ):
_UpperCAmelCase = self.feature_extractor.preprocess(__UpperCamelCase )
_UpperCAmelCase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
_UpperCAmelCase = self.clip_model.get_image_features(__UpperCamelCase )
_UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCamelCase )
_UpperCAmelCase = image_embeddings_clip.repeat_interleave(__UpperCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , ):
_UpperCAmelCase = latents.detach().requires_grad_()
_UpperCAmelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
_UpperCAmelCase = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_UpperCAmelCase = self.scheduler.alphas_cumprod[timestep]
_UpperCAmelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCAmelCase = torch.sqrt(__UpperCamelCase )
_UpperCAmelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __UpperCamelCase ):
_UpperCAmelCase = self.scheduler.sigmas[index]
_UpperCAmelCase = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase = 1 / 0.18215 * sample
_UpperCAmelCase = self.vae.decode(__UpperCamelCase ).sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = transforms.Resize(self.feature_extractor_size )(__UpperCamelCase )
_UpperCAmelCase = self.normalize(__UpperCamelCase ).to(latents.dtype )
_UpperCAmelCase = self.clip_model.get_image_features(__UpperCamelCase )
_UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCamelCase )
_UpperCAmelCase = spherical_dist_loss(__UpperCamelCase , __UpperCamelCase ).mean() * clip_guidance_scale
_UpperCAmelCase = -torch.autograd.grad(__UpperCamelCase , __UpperCamelCase )[0]
if isinstance(self.scheduler , __UpperCamelCase ):
_UpperCAmelCase = latents.detach() + grads * (sigma**2)
_UpperCAmelCase = noise_pred_original
else:
_UpperCAmelCase = noise_pred_original - torch.sqrt(__UpperCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str , __UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[int] = 512 , __UpperCamelCase : Optional[int] = 512 , __UpperCamelCase : float = 0.6 , __UpperCamelCase : Optional[int] = 50 , __UpperCamelCase : Optional[float] = 7.5 , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[float] = 100 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : float = 0.8 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , ):
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__UpperCamelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__UpperCamelCase , torch.Generator ) and batch_size > 1:
_UpperCAmelCase = [generator] + [None] * (batch_size - 1)
_UpperCAmelCase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_UpperCAmelCase = [x[0] for x in coca_is_none if x[1]]
_UpperCAmelCase = ", ".join(__UpperCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__UpperCamelCase ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_UpperCAmelCase = self.get_image_description(__UpperCamelCase )
if style_prompt is None:
if len(__UpperCamelCase ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_UpperCAmelCase = self.get_image_description(__UpperCamelCase )
# get prompt text embeddings for content and style
_UpperCAmelCase = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="pt" , )
_UpperCAmelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_UpperCAmelCase = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="pt" , )
_UpperCAmelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_UpperCAmelCase = slerp(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase = text_embeddings.repeat_interleave(__UpperCamelCase , dim=0 )
# set timesteps
_UpperCAmelCase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_offset:
_UpperCAmelCase = 1
self.scheduler.set_timesteps(__UpperCamelCase , **__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_UpperCAmelCase , _UpperCAmelCase = self.get_timesteps(__UpperCamelCase , __UpperCamelCase , self.device )
_UpperCAmelCase = timesteps[:1].repeat(__UpperCamelCase )
# Preprocess image
_UpperCAmelCase = preprocess(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text_embeddings.dtype , self.device , __UpperCamelCase )
_UpperCAmelCase = preprocess(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text_embeddings.dtype , self.device , __UpperCamelCase )
_UpperCAmelCase = slerp(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if clip_guidance_scale > 0:
_UpperCAmelCase = self.get_clip_image_embeddings(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.get_clip_image_embeddings(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = slerp(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase = content_text_input.input_ids.shape[-1]
_UpperCAmelCase = self.tokenizer([""] , padding="max_length" , max_length=__UpperCamelCase , return_tensors="pt" )
_UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCAmelCase = uncond_embeddings.repeat_interleave(__UpperCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCAmelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCAmelCase = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
_UpperCAmelCase = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_UpperCAmelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
# check if the scheduler accepts generator
_UpperCAmelCase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_UpperCAmelCase = generator
with self.progress_bar(total=__UpperCamelCase ):
for i, t in enumerate(__UpperCamelCase ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
_UpperCAmelCase = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCAmelCase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCAmelCase , _UpperCAmelCase = self.cond_fn(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase = 1 / 0.18215 * latents
_UpperCAmelCase = self.vae.decode(__UpperCamelCase ).sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 129 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : List[str] = ["""torch""", """torchsde"""]
def __init__( self : Optional[Any] , *__UpperCamelCase : int , **__UpperCamelCase : List[Any] ):
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[str] ):
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] ):
requires_backends(cls , ["torch", "torchsde"] )
| 129 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Union[str, Any] = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCamelCase( lowerCAmelCase_ ):
snake_case_ : int = 'wavlm'
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int=3_2 , SCREAMING_SNAKE_CASE : str=7_6_8 , SCREAMING_SNAKE_CASE : List[Any]=1_2 , SCREAMING_SNAKE_CASE : Any=1_2 , SCREAMING_SNAKE_CASE : Any=3_0_7_2 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : str=1e-5 , SCREAMING_SNAKE_CASE : List[Any]="group" , SCREAMING_SNAKE_CASE : Optional[int]="gelu" , SCREAMING_SNAKE_CASE : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE : str=(1_0, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : str=1_2_8 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE : Optional[Any]=3_2_0 , SCREAMING_SNAKE_CASE : List[Any]=8_0_0 , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=0.05 , SCREAMING_SNAKE_CASE : Any=1_0 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : List[Any]=1_0 , SCREAMING_SNAKE_CASE : int=3_2_0 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Tuple=1_0_0 , SCREAMING_SNAKE_CASE : Tuple=2_5_6 , SCREAMING_SNAKE_CASE : int=2_5_6 , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]="mean" , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : List[str]=2_5_6 , SCREAMING_SNAKE_CASE : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , SCREAMING_SNAKE_CASE : List[str]=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE : Any=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE : List[str]=8_0 , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[Any]=None , **SCREAMING_SNAKE_CASE : str , ) -> List[Any]:
'''simple docstring'''
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(A__ )
__snake_case = list(A__ )
__snake_case = list(A__ )
__snake_case = conv_bias
__snake_case = num_buckets
__snake_case = max_bucket_distance
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layerdrop
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = num_ctc_classes
__snake_case = vocab_size
__snake_case = do_stable_layer_norm
__snake_case = use_weighted_layer_sum
__snake_case = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
# parameters for pretraining with codevector quantized representations
__snake_case = num_codevectors_per_group
__snake_case = num_codevector_groups
__snake_case = contrastive_logits_temperature
__snake_case = num_negatives
__snake_case = codevector_dim
__snake_case = proj_codevector_dim
__snake_case = diversity_loss_weight
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# adapter
__snake_case = add_adapter
__snake_case = adapter_kernel_size
__snake_case = adapter_stride
__snake_case = num_adapter_layers
__snake_case = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case = list(A__ )
__snake_case = list(A__ )
__snake_case = list(A__ )
__snake_case = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 371 |
'''simple docstring'''
from torch import nn
def __lowercase (_lowercase ) -> Union[str, Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 150 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
def __init__( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : Optional[Any]=3_0 , __lowerCamelCase : Any=2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : str=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Union[str, Any]=3_7 , __lowerCamelCase : str="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1_0 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=2 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 2
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFDeiTModel(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForMaskedImageModeling(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForMaskedImageModeling(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase = TFDeiTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Dense ) )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowercase ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=False ) -> int:
"""simple docstring"""
UpperCAmelCase = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFDeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) ->Tuple:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 708 |
import math
import qiskit
def _UpperCamelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 ) ->qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
UpperCAmelCase = qiskit.QuantumRegister(4 , """qr""" )
UpperCAmelCase = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
UpperCAmelCase = [input_a, input_a, carry_in]
UpperCAmelCase = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
UpperCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
UpperCAmelCase = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_0_0_0 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 627 | 0 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = "first_exhausted" , ) ->DatasetType:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(UpperCAmelCase_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase_ ).__name__}.''' )
if i == 0:
snake_case__ , snake_case__ = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , info=UpperCAmelCase_ , split=UpperCAmelCase_ , stopping_strategy=UpperCAmelCase_ )
else:
return _interleave_iterable_datasets(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , info=UpperCAmelCase_ , split=UpperCAmelCase_ , stopping_strategy=UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , ) ->DatasetType:
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(UpperCAmelCase_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase_ ).__name__}.''' )
if i == 0:
snake_case__ , snake_case__ = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCAmelCase_ , info=UpperCAmelCase_ , split=UpperCAmelCase_ , axis=UpperCAmelCase_ )
else:
return _concatenate_iterable_datasets(UpperCAmelCase_ , info=UpperCAmelCase_ , split=UpperCAmelCase_ , axis=UpperCAmelCase_ )
| 368 |
'''simple docstring'''
a__ : Optional[Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
snake_case__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCAmelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCAmelCase_ )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](UpperCAmelCase_ , UpperCAmelCase_ )
operand_stack.push(UpperCAmelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a__ : Any = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 368 | 1 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase : int = int(__lowerCAmelCase )
assert noofclusters < len(__lowerCAmelCase )
# Find out the dimensionality
_lowerCAmelCase : int = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : List[Any] = list(range(len(__lowerCAmelCase ) ) )
shuffle(__lowerCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : Any = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : Optional[int] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__lowerCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(__lowerCAmelCase ,__lowerCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Optional[Any] = [tf.Variable(0 ) for i in range(len(__lowerCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : Dict = tf.placeholder("""int32""" )
_lowerCAmelCase : List[str] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__lowerCAmelCase ,__lowerCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : List[Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : List[Any] = tf.reduce_mean(__lowerCAmelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Tuple = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Optional[int] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__lowerCAmelCase ,__lowerCAmelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Optional[Any] = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : Any = tf.argmin(__lowerCAmelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Any = tf.initialize_all_variables()
# Initialize all variables
sess.run(__lowerCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : Union[str, Any] = 100
for _ in range(__lowerCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__lowerCAmelCase ) ):
_lowerCAmelCase : Dict = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : int = [
sess.run(__lowerCAmelCase ,feed_dict={va: vect, va: sess.run(__lowerCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Optional[Any] = sess.run(
__lowerCAmelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__lowerCAmelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : Any = [
vectors[i]
for i in range(len(__lowerCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
__lowerCAmelCase ,feed_dict={mean_input: array(__lowerCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Dict = sess.run(__lowerCAmelCase )
_lowerCAmelCase : List[str] = sess.run(__lowerCAmelCase )
return centroids, assignments
| 712 | """simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : List[Any] , __snake_case : int=13 , __snake_case : Tuple=7 , __snake_case : Union[str, Any]=True , __snake_case : List[str]=True , __snake_case : List[str]=True , __snake_case : Optional[int]=True , __snake_case : Optional[Any]=99 , __snake_case : str=32 , __snake_case : Dict=5 , __snake_case : List[str]=4 , __snake_case : Dict=37 , __snake_case : Dict="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Tuple=5_12 , __snake_case : Optional[Any]=16 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=0.02 , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : Optional[int]="None" , __snake_case : Optional[Any]=3 , __snake_case : Dict=4 , __snake_case : str=None , )-> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = relative_attention
snake_case = position_biased_input
snake_case = pos_att_type
snake_case = scope
def lowerCAmelCase ( self : Dict )-> Tuple:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict )-> Tuple:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase ( self : Dict )-> List[Any]:
snake_case = self.get_config()
snake_case = 3_00
return config
def lowerCAmelCase ( self : int , __snake_case : Optional[Any] )-> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase ( self : List[str] , __snake_case : Dict , __snake_case : Dict , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[Any] )-> Union[str, Any]:
snake_case = DebertaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )[0]
snake_case = model(__snake_case , token_type_ids=__snake_case )[0]
snake_case = model(__snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase ( self : List[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Dict )-> str:
snake_case = DebertaForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[str] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : int , __snake_case : str , __snake_case : List[Any] )-> Union[str, Any]:
snake_case = self.num_labels
snake_case = DebertaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : Dict , __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any , __snake_case : Tuple )-> Union[str, Any]:
snake_case = self.num_labels
snake_case = DebertaForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] )-> Any:
snake_case = DebertaForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[str] )-> Dict:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowerCAmelCase ( self : int )-> str:
snake_case = DebertaModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowerCAmelCase ( self : str )-> List[str]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int )-> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__snake_case )
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__snake_case )
def lowerCAmelCase ( self : Optional[int] )-> Any:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__snake_case )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__snake_case )
@slow
def lowerCAmelCase ( self : Optional[int] )-> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = DebertaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def lowerCAmelCase ( self : Any )-> Tuple:
pass
@slow
def lowerCAmelCase ( self : int )-> Any:
snake_case = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
snake_case = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case = model(__snake_case , attention_mask=__snake_case )[0]
# compare the actual values for a slice.
snake_case = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 369 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , ) -> float:
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
__lowerCAmelCase , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase : Optional[int] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = KandinskyImgaImgPipeline
a_ = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a_ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a_ = False
@property
def A ( self : Optional[Any] ) -> Tuple:
return 32
@property
def A ( self : Tuple ) -> Tuple:
return 32
@property
def A ( self : str ) -> List[str]:
return self.time_input_dim
@property
def A ( self : List[str] ) -> int:
return self.time_input_dim * 4
@property
def A ( self : int ) -> str:
return 1_00
@property
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def A ( self : Optional[Any] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
UpperCAmelCase_ : str = MultilingualCLIP(_A )
UpperCAmelCase_ : Tuple = text_encoder.eval()
return text_encoder
@property
def A ( self : int ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ : Tuple = UNetaDConditionModel(**_A )
return model
@property
def A ( self : List[str] ) -> Tuple:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : str ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Any ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : Optional[int] = self.dummy_tokenizer
UpperCAmelCase_ : Optional[int] = self.dummy_unet
UpperCAmelCase_ : Optional[Any] = self.dummy_movq
UpperCAmelCase_ : Optional[int] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase_ : Tuple = DDIMScheduler(**_A )
UpperCAmelCase_ : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : str , _A : Optional[int] , _A : Union[str, Any]=0 ) -> str:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
UpperCAmelCase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[Any] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Union[str, Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Dict ) -> int:
UpperCAmelCase_ : str = '''cpu'''
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
UpperCAmelCase_ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : List[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase):
def A ( self : Tuple ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
UpperCAmelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase_ : Tuple = '''A red cartoon frog, 4k'''
UpperCAmelCase_ : Dict = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase_ : Any = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
UpperCAmelCase_ : Optional[int] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ : str = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ : Any = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase_ : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_A , _A )
| 216 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :List[str] = logging.get_logger(__name__)
lowerCAmelCase :Dict = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = """vit_msn"""
def __init__( self : Union[str, Any] , _A : Optional[int]=768 , _A : List[str]=12 , _A : Optional[Any]=12 , _A : Tuple=3072 , _A : Optional[Any]="gelu" , _A : Any=0.0 , _A : List[str]=0.0 , _A : str=0.02 , _A : str=1E-06 , _A : Tuple=224 , _A : Any=16 , _A : int=3 , _A : List[str]=True , **_A : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase__ )
__magic_name__ : str = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : List[str] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : List[Any] = layer_norm_eps
__magic_name__ : Any = image_size
__magic_name__ : Tuple = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : Tuple = qkv_bias | 561 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions | 45 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure) | 240 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class snake_case_ ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A_ = field(default='summarization' ,metadata={'include_in_asdict_even_if_is_default': True} )
A_ = Features({'text': Value('string' )} )
A_ = Features({'summary': Value('string' )} )
A_ = "text"
A_ = "summary"
@property
def UpperCAmelCase__ ( self : int )->Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"} | 240 | 1 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
a = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[Any] ):
return FSMTTokenizer.from_pretrained(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int ):
_A = FSMTForConditionalGeneration.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_A = F'''facebook/wmt19-{pair}'''
_A = self.get_tokenizer(_UpperCAmelCase )
_A = self.get_model(_UpperCAmelCase )
_A = bleu_data[pair]['src']
_A = bleu_data[pair]['tgt']
_A = tokenizer(_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase , padding='longest' ).to(_UpperCAmelCase )
_A = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_A = tokenizer.batch_decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
_A = calculate_bleu(_UpperCAmelCase , _UpperCAmelCase )
print(_UpperCAmelCase )
self.assertGreaterEqual(scores['bleu'] , _UpperCAmelCase )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ : Optional[int] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
"""simple docstring"""
import requests
UpperCAmelCase : str = "YOUR API KEY"
def __a ( _lowercase , _lowercase = giphy_api_key ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = '''+'''.join(query.split() )
lowerCamelCase__ : Optional[int] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowerCamelCase__ : Dict = requests.get(_lowercase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 121 | """simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = 42
UpperCAmelCase = None
UpperCAmelCase = None
def __a ( _lowercase ):
"""simple docstring"""
def is_valid_tree(_lowercase ) -> bool:
if node is None:
return True
if not isinstance(_lowercase , _lowercase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_lowercase ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
_lowercase , _lowercase , _lowercase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _lowercase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _lowercase )
)
return is_binary_search_tree_recursive_check(_lowercase , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = "▁"
_UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase = {
"facebook/xglm-564M": 2_048,
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : Optional[Any]="<s>" , UpperCamelCase__ : int="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Tuple , ):
A = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
A = 7
A = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
A = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A = 1
# Mimic fairseq token-to-id alignment for the first 4 token
A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
A = len(self.sp_model )
A = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCamelCase__ )
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ):
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , UpperCamelCase__ : Any ):
A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
A = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ ))
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCamelCase ( self : str ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCamelCase ( self : int ):
A = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self : Dict , UpperCamelCase__ : str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str ):
A = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def UpperCamelCase ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , 'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 699 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int ) ->Tuple:
"""simple docstring"""
if length <= 0 or not isinstance(lowercase , lowercase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 717 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( a ):
"""simple docstring"""
A_ = ''
A_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A_ = None # compression type in fsspec. ex: "gzip"
A_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCamelCase = "" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase )-> List[str]:
super().__init__(self , **_lowerCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ = fsspec.open(
_lowerCamelCase , mode='''rb''' , protocol=_lowerCamelCase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowercase__ = os.path.basename(self.file.path.split('''::''' )[0] )
lowercase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowercase__ = None
@classmethod
def snake_case_( cls , _lowerCamelCase )-> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCamelCase ).lstrip('''/''' )
def snake_case_( self )-> List[str]:
if self.dir_cache is None:
lowercase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowercase__ = {f['''name''']: f}
def snake_case_( self , _lowerCamelCase )-> List[str]:
return self.file.open().read()
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , )-> Tuple:
lowercase__ = self._strip_protocol(_lowerCamelCase )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __A ( a ):
"""simple docstring"""
A_ = 'bz2'
A_ = 'bz2'
A_ = '.bz2'
class __A ( a ):
"""simple docstring"""
A_ = 'gzip'
A_ = 'gzip'
A_ = '.gz'
class __A ( a ):
"""simple docstring"""
A_ = 'lz4'
A_ = 'lz4'
A_ = '.lz4'
class __A ( a ):
"""simple docstring"""
A_ = 'xz'
A_ = 'xz'
A_ = '.xz'
class __A ( a ):
"""simple docstring"""
A_ = 'zstd'
A_ = 'zstd'
A_ = '.zst'
def __init__( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = DEFAULT_BLOCK_SIZE , **_lowerCamelCase , )-> Tuple:
super().__init__(
fo=_lowerCamelCase , mode=_lowerCamelCase , target_protocol=_lowerCamelCase , target_options=_lowerCamelCase , block_size=_lowerCamelCase , **_lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ = self.file.__enter__
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> Union[str, Any]:
lowercase__ = file_
def __enter__( self )-> int:
self._file.__enter__()
return self
def __exit__( self , *_lowerCamelCase , **_lowerCamelCase )-> List[str]:
self._file.__exit__(*_lowerCamelCase , **_lowerCamelCase )
def __iter__( self )-> int:
return iter(self._file )
def snake_case_( self )-> List[Any]:
return next(self._file )
def __getattr__( self , _lowerCamelCase )-> Any:
return getattr(self._file , _lowerCamelCase )
def fixed_enter(*_lowerCamelCase , **_lowerCamelCase ):
return WrappedFile(_enter(*_lowerCamelCase , **_lowerCamelCase ) )
lowercase__ = fixed_enter
| 318 | 0 |
"""simple docstring"""
import math
def a ( __UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( __UpperCAmelCase : float = 0.1 ) -> int:
__magic_name__: Tuple = 3
__magic_name__: Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
UpperCamelCase_ = Mapping[str, np.ndarray]
UpperCamelCase_ = Mapping[str, Any] # Is a nested dict.
UpperCamelCase_ = 0.01
@dataclasses.dataclass(frozen=_snake_case )
class a_ :
UpperCamelCase__ : Optional[int] =42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase__ : int =42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase__ : str =42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase__ : Union[str, Any] =42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase__ : Tuple =42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase__ : Dict =None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase__ : Any =None
# Templates used to generate this protein (prediction-only)
UpperCamelCase__ : Optional[int] =None
# Chain corresponding to each parent
UpperCamelCase__ : int =None
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = r'''(\[[A-Z]+\]\n)'''
UpperCAmelCase_ = [tag.strip() for tag in re.split(__A , __A ) if len(__A ) > 0]
UpperCAmelCase_ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
UpperCAmelCase_ = ['''N''', '''CA''', '''C''']
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCAmelCase_ = g[1][0].strip()
for i in range(len(__A ) ):
if seq[i] not in residue_constants.restypes:
UpperCAmelCase_ = '''X''' # FIXME: strings are immutable
UpperCAmelCase_ = np.array(
[residue_constants.restype_order.get(__A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCAmelCase_ = []
for axis in range(3 ):
tertiary.append(list(map(__A , g[1][axis].split() ) ) )
UpperCAmelCase_ = np.array(__A )
UpperCAmelCase_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase_ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCAmelCase_ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
UpperCAmelCase_ = np.zeros(
(
len(__A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase_ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__A , atom_mask=__A , aatype=__A , residue_index=np.arange(len(__A ) ) , b_factors=__A , )
def A ( __UpperCAmelCase , __UpperCAmelCase = 0 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
UpperCAmelCase_ = prot.parents
UpperCAmelCase_ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCAmelCase_ = [p for i, p in zip(__A , __A ) if i == chain_id]
if parents is None or len(__A ) == 0:
UpperCAmelCase_ = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(__A )}" )
return pdb_headers
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = pdb_str.split('''\n''' )
UpperCAmelCase_ = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
UpperCAmelCase_ = 42
if prot.parents is not None and len(prot.parents ) > 0:
UpperCAmelCase_ = []
if prot.parents_chain_index is not None:
UpperCAmelCase_ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__A ) , [] )
parent_dict[str(__A )].append(__A )
UpperCAmelCase_ = max([int(__A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCAmelCase_ = parent_dict.get(str(__A ) , ['''N/A'''] )
parents_per_chain.append(__A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCAmelCase_ = [['''N/A''']]
def make_parent_line(__UpperCAmelCase ) -> str:
return f"PARENT {' '.join(__A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCAmelCase_ = 0
for i, l in enumerate(__A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__A ):
UpperCAmelCase_ = parents_per_chain[chain_counter]
else:
UpperCAmelCase_ = ['''N/A''']
out_pdb_lines.append(make_parent_line(__A ) )
return "\n".join(__A )
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = residue_constants.restypes + ['''X''']
def res_atoa(__UpperCAmelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
UpperCAmelCase_ = residue_constants.atom_types
UpperCAmelCase_ = []
UpperCAmelCase_ = prot.atom_mask
UpperCAmelCase_ = prot.aatype
UpperCAmelCase_ = prot.atom_positions
UpperCAmelCase_ = prot.residue_index.astype(np.intaa )
UpperCAmelCase_ = prot.b_factors
UpperCAmelCase_ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
UpperCAmelCase_ = get_pdb_headers(__A )
if len(__A ) > 0:
pdb_lines.extend(__A )
UpperCAmelCase_ = aatype.shape[0]
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = string.ascii_uppercase
UpperCAmelCase_ = None
# Add all atom sites.
for i in range(__A ):
UpperCAmelCase_ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCAmelCase_ = '''ATOM'''
UpperCAmelCase_ = atom_name if len(__A ) == 4 else f" {atom_name}"
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = 1.00
UpperCAmelCase_ = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = '''A'''
if chain_index is not None:
UpperCAmelCase_ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCAmelCase_ = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(__A )
atom_index += 1
UpperCAmelCase_ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCAmelCase_ = True
UpperCAmelCase_ = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCAmelCase_ = '''TER'''
UpperCAmelCase_ = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(__A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__A , __A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__A )
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> Any:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__A , remark=__A , parents=__A , parents_chain_index=__A , )
| 701 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] ="bert"
def __init__( self :Any , _lowercase :Optional[int]=30522 , _lowercase :str=768 , _lowercase :Union[str, Any]=12 , _lowercase :Dict=12 , _lowercase :Optional[Any]=3072 , _lowercase :List[Any]="gelu" , _lowercase :Dict=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :Optional[int]=512 , _lowercase :List[str]=2 , _lowercase :List[str]=0.02 , _lowercase :Union[str, Any]=1E-1_2 , _lowercase :Dict=0 , _lowercase :List[str]="absolute" , _lowercase :Union[str, Any]=True , _lowercase :str=None , **_lowercase :Union[str, Any] , ) -> Dict:
super().__init__(pad_token_id=_lowercase , **_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a_ ( _snake_case ):
@property
def __a ( self :str) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 561 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''informer'''
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Tuple , _A : Optional[int] = None , _A : Optional[int] = None , _A : str = "student_t" , _A : str = "nll" , _A : int = 1 , _A : List[int] = None , _A : Optional[Union[str, bool]] = "mean" , _A : int = 0 , _A : int = 0 , _A : int = 0 , _A : int = 0 , _A : Optional[List[int]] = None , _A : Optional[List[int]] = None , _A : int = 64 , _A : int = 32 , _A : int = 32 , _A : int = 2 , _A : int = 2 , _A : int = 2 , _A : int = 2 , _A : bool = True , _A : str = "gelu" , _A : float = 0.05 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : int = 100 , _A : float = 0.02 , _A : Union[str, Any]=True , _A : str = "prob" , _A : int = 5 , _A : bool = True , **_A : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = prediction_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = context_length or prediction_length
__SCREAMING_SNAKE_CASE : Optional[int] = distribution_output
__SCREAMING_SNAKE_CASE : Optional[int] = loss
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_size
__SCREAMING_SNAKE_CASE : Any = num_time_features
__SCREAMING_SNAKE_CASE : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__SCREAMING_SNAKE_CASE : Tuple = scaling
__SCREAMING_SNAKE_CASE : List[Any] = num_dynamic_real_features
__SCREAMING_SNAKE_CASE : str = num_static_real_features
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__SCREAMING_SNAKE_CASE : List[Any] = cardinality
else:
__SCREAMING_SNAKE_CASE : int = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__SCREAMING_SNAKE_CASE : Any = embedding_dimension
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE : str = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE : List[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
__SCREAMING_SNAKE_CASE : Optional[int] = d_model
__SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
__SCREAMING_SNAKE_CASE : List[str] = decoder_layers
__SCREAMING_SNAKE_CASE : List[str] = dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
__SCREAMING_SNAKE_CASE : Any = activation_dropout
__SCREAMING_SNAKE_CASE : int = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
__SCREAMING_SNAKE_CASE : Optional[Any] = activation_function
__SCREAMING_SNAKE_CASE : Optional[Any] = init_std
__SCREAMING_SNAKE_CASE : List[str] = use_cache
# Informer
__SCREAMING_SNAKE_CASE : Optional[int] = attention_type
__SCREAMING_SNAKE_CASE : int = sampling_factor
__SCREAMING_SNAKE_CASE : Optional[Any] = distil
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 74 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase (a_ ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """FlavaImageProcessor"""
snake_case_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase )-> Tuple:
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = self.image_processor
def __call__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> int:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if images is not None:
__lowerCAmelCase = self.image_processor(
__UpperCamelCase , return_image_mask=__UpperCamelCase , return_codebook_pixels=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if text is not None and images is not None:
encoding.update(__UpperCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> int:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCAmelCase ( self )-> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self )-> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 367 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase = """"""
lowerCAmelCase = """"""
lowerCAmelCase = """"""
lowerCAmelCase = """"""
def __A ( a_ : str ):
# authorize twitter, initialize tweepy
lowerCAmelCase : Tuple = tweepy.OAuthHandler(a_ ,a_ )
auth.set_access_token(a_ ,a_ )
lowerCAmelCase : Optional[int] = tweepy.API(a_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase : List[Any] = api.user_timeline(screen_name=a_ ,count=2_0_0 )
# save most recent tweets
alltweets.extend(a_ )
# save the id of the oldest tweet less one
lowerCAmelCase : Dict = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a_ ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase : Any = api.user_timeline(
screen_name=a_ ,count=2_0_0 ,max_id=a_ )
# save most recent tweets
alltweets.extend(a_ )
# update the id of the oldest tweet less one
lowerCAmelCase : Dict = alltweets[-1].id - 1
print(f'''...{len(a_ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"w" ) as f:
lowerCAmelCase : Dict = csv.writer(a_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(a_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 551 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCamelCase ( _A ):
snake_case_ = "microsoft/speecht5_tts"
snake_case_ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
snake_case_ = "text_reader"
snake_case_ = SpeechTaProcessor
snake_case_ = SpeechTaForTextToSpeech
snake_case_ = SpeechTaHifiGan
snake_case_ = ["text"]
snake_case_ = ["audio"]
def _lowerCamelCase ( self ):
if self.post_processor is None:
lowerCAmelCase : str = "microsoft/speecht5_hifigan"
super().setup()
def _lowerCamelCase ( self , a_ , a_=None ):
lowerCAmelCase : str = self.pre_processor(text=a_ , return_tensors="pt" , truncation=a_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase : Tuple = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
lowerCAmelCase : Union[str, Any] = torch.tensor(embeddings_dataset[7_305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCamelCase ( self , a_ ):
with torch.no_grad():
return self.model.generate_speech(**a_ )
def _lowerCamelCase ( self , a_ ):
with torch.no_grad():
return self.post_processor(a_ ).cpu().detach()
| 551 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = StableUnCLIPPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[str] = 3_2
snake_case__ : List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
snake_case__ : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=__SCREAMING_SNAKE_CASE , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
snake_case__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__SCREAMING_SNAKE_CASE , num_layers=1 , )
torch.manual_seed(0 )
snake_case__ : Tuple = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_0_0_0 , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
snake_case__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=__SCREAMING_SNAKE_CASE )
snake_case__ : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
snake_case__ : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
snake_case__ : Dict = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=__SCREAMING_SNAKE_CASE , use_linear_projection=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
snake_case__ : List[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL()
snake_case__ : Optional[int] = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[str] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : List[str] = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Any = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
snake_case__ : Optional[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ : int = pipe("""anime turle""" , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" )
snake_case__ : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
snake_case__ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ : int = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
snake_case__ : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 38 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case__ : Dict = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
__SCREAMING_SNAKE_CASE : List[str] = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
__SCREAMING_SNAKE_CASE : Dict = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
__SCREAMING_SNAKE_CASE : str = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
] , )
__SCREAMING_SNAKE_CASE : Any = text_generator.model.config.eos_token_id
__SCREAMING_SNAKE_CASE : str = "<pad>"
__SCREAMING_SNAKE_CASE : Optional[int] = text_generator(
["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
] , )
@require_tf
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
__SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
__SCREAMING_SNAKE_CASE : Any = text_generator(["This is a test", "This is a second test"] , do_sample=a__ )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = TextGenerationPipeline(model=a__ , tokenizer=a__ )
return text_generator, ["This is a test", "Another test"]
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = "Hello I believe in"
__SCREAMING_SNAKE_CASE : Tuple = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
__SCREAMING_SNAKE_CASE : int = text_generator(a__ )
self.assertEqual(
a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
__SCREAMING_SNAKE_CASE : List[Any] = text_generator(a__ , stop_sequence=" fe" )
self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] )
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = text_generator.model
__SCREAMING_SNAKE_CASE : Any = text_generator.tokenizer
__SCREAMING_SNAKE_CASE : Tuple = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
__SCREAMING_SNAKE_CASE : str = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ )
__SCREAMING_SNAKE_CASE : int = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
__SCREAMING_SNAKE_CASE : List[str] = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
__SCREAMING_SNAKE_CASE : List[Any] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__SCREAMING_SNAKE_CASE : Any = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
__SCREAMING_SNAKE_CASE : Any = text_generator("test" , return_full_text=a__ , return_text=a__ )
with self.assertRaises(a__ ):
__SCREAMING_SNAKE_CASE : Dict = text_generator("test" , return_full_text=a__ , return_tensors=a__ )
with self.assertRaises(a__ ):
__SCREAMING_SNAKE_CASE : Tuple = text_generator("test" , return_text=a__ , return_tensors=a__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__SCREAMING_SNAKE_CASE : Tuple = text_generator("" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__SCREAMING_SNAKE_CASE : str = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
__SCREAMING_SNAKE_CASE : List[str] = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a__ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a_ ( self ):
import torch
# Classic `model_kwargs`
__SCREAMING_SNAKE_CASE : Any = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__SCREAMING_SNAKE_CASE : str = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__SCREAMING_SNAKE_CASE : Dict = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__SCREAMING_SNAKE_CASE : List[str] = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__SCREAMING_SNAKE_CASE : List[str] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def a_ ( self ):
import torch
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def a_ ( self ):
import torch
__SCREAMING_SNAKE_CASE : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=a__ , top_p=0.5 )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = "Hello world"
__SCREAMING_SNAKE_CASE : str = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger("transformers.generation.tf_utils" )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger("transformers.generation.utils" )
__SCREAMING_SNAKE_CASE : Optional[Any] = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a__ ) as cl:
__SCREAMING_SNAKE_CASE : str = text_generator(a__ , max_length=10 , max_new_tokens=1 )
self.assertIn(a__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a__ ) as cl:
__SCREAMING_SNAKE_CASE : List[str] = text_generator(a__ , max_new_tokens=1 )
self.assertNotIn(a__ , cl.out )
with CaptureLogger(a__ ) as cl:
__SCREAMING_SNAKE_CASE : Dict = text_generator(a__ , max_length=10 )
self.assertNotIn(a__ , cl.out )
| 564 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=6 , a__=17 , a__=23 , a__=11 , a__=True , ):
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : str = act_dim
__SCREAMING_SNAKE_CASE : Optional[int] = state_dim
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_length
__SCREAMING_SNAKE_CASE : int = is_training
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__SCREAMING_SNAKE_CASE : int = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__SCREAMING_SNAKE_CASE : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
__SCREAMING_SNAKE_CASE : int = floats_tensor((self.batch_size, self.seq_length, 1) )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__SCREAMING_SNAKE_CASE : Tuple = random_attention_mask((self.batch_size, self.seq_length) )
__SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a_ ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = DecisionTransformerModel(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(a__ , a__ , a__ , a__ , a__ , a__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
__SCREAMING_SNAKE_CASE : int = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
snake_case__ : Optional[Any] = ()
snake_case__ : List[str] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
snake_case__ : List[str] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
snake_case__ : Optional[Any] = False
snake_case__ : str = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Any = False
snake_case__ : str = False
snake_case__ : Dict = False
snake_case__ : List[Any] = False
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = DecisionTransformerModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@slow
def a_ ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Any = DecisionTransformerModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(a__ )
__SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(a__ )] , a__ )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = 2 # number of steps of autoregressive prediction we will perform
__SCREAMING_SNAKE_CASE : str = 10 # defined by the RL environment, may be normalized
__SCREAMING_SNAKE_CASE : str = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
__SCREAMING_SNAKE_CASE : List[Any] = model.to(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.config
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ) # env.reset()
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=a__ )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(a__ , device=a__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : Tuple = state
__SCREAMING_SNAKE_CASE : List[str] = torch.zeros(1 , 0 , config.act_dim , device=a__ , dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Tuple = torch.zeros(1 , 0 , device=a__ , dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(0 , device=a__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=a__ )] , dim=1 )
__SCREAMING_SNAKE_CASE : Dict = torch.cat([rewards, torch.zeros(1 , 1 , device=a__ )] , dim=1 )
__SCREAMING_SNAKE_CASE : List[Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = model(
states=a__ , actions=a__ , rewards=a__ , returns_to_go=a__ , timesteps=a__ , attention_mask=a__ , return_dict=a__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ),
1.0,
False,
{},
)
__SCREAMING_SNAKE_CASE : Tuple = action_pred[0, -1]
__SCREAMING_SNAKE_CASE : int = torch.cat([states, state] , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = returns_to_go[0, -1] - reward
__SCREAMING_SNAKE_CASE : Any = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(
[timesteps, torch.ones((1, 1) , device=a__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 564 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Optional[Any] = FlaxAutoencoderKL
@property
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = 4
__lowerCamelCase = 3
__lowerCamelCase = (32, 32)
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.uniform(SCREAMING_SNAKE_CASE__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __A ( self : str ) -> Tuple:
__lowerCamelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
| 298 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Tuple ) -> Optional[int]:
__lowerCamelCase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__lowerCamelCase = {
'''input_ids''': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
__lowerCamelCase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 298 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ :Dict = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
def lowercase_ ( self : List[str] ) ->Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Union[str, Any] = XLMProphetNetTokenizer(_snake_case, keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Dict ) ->Tuple:
snake_case__ : str = '[PAD]'
snake_case__ : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ), _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ), _snake_case )
def lowercase_ ( self : Any ) ->Any:
snake_case__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '[PAD]' )
self.assertEqual(vocab_keys[1], '[CLS]' )
self.assertEqual(vocab_keys[-1], 'j' )
self.assertEqual(len(_snake_case ), 1_0_1_2 )
def lowercase_ ( self : int ) ->List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_1_2 )
def lowercase_ ( self : Union[str, Any] ) ->List[Any]:
snake_case__ : str = XLMProphetNetTokenizer(_snake_case, keep_accents=_snake_case )
snake_case__ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(_snake_case, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ), [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]], )
snake_case__ : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
snake_case__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case, [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
], )
snake_case__ : str = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
], )
@cached_property
def lowercase_ ( self : str ) ->Any:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowercase_ ( self : Optional[Any] ) ->Optional[Any]:
snake_case__ : Optional[int] = 'Hello World!'
snake_case__ : Dict = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_snake_case, self.big_tokenizer.encode(_snake_case ) )
@slow
def lowercase_ ( self : List[str] ) ->Union[str, Any]:
# fmt: off
snake_case__ : List[Any] = {'input_ids': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case, model_name='microsoft/xprophetnet-large-wiki100-cased', revision='1acad1643ddd54a44df6a1b797ada8373685d90e', )
| 243 |
from __future__ import annotations
def lowercase_ (A : list[int] ):
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 514 |
from PIL import Image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Image , SCREAMING_SNAKE_CASE_: int ) -> Image:
'''simple docstring'''
A__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(SCREAMING_SNAKE_CASE_: int ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
lowerCAmelCase__ = change_contrast(img, 1_7_0)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 514 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('google/mt5-small' )
lowerCamelCase_ = tokenizer('Hello there' , return_tensors='tf' ).input_ids
lowerCamelCase_ = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
lowerCamelCase_ = model(lowerCamelCase_ , labels=lowerCamelCase_ ).loss
lowerCamelCase_ = -tf.math.reduce_mean(lowerCamelCase_ ).numpy()
lowerCamelCase_ = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 ) | 710 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTaTokenizer
SCREAMING_SNAKE_CASE_ = GPTaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = 'lower newer'
return input_text, output_text
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'lower newer'
# Testing tokenization
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing the unknown token
lowerCamelCase_ = tokens + [rust_tokenizer.unk_token]
lowerCamelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Simple input
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ = ('This is a simple input', 'This is a pair')
lowerCamelCase_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase_ = ('This is a simple input', 'This is a pair')
lowerCamelCase_ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase_ = tokenizer.pad_token_id
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=30 , return_tensors='np' )
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowerCamelCase_ = tokenizer(*SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=60 , return_tensors='np' )
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = '$$$'
lowerCamelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE_ , add_bos_token=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ = tokenizer.bos_token_id
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase_ = tokenizer.decode(out_s.input_ids )
lowerCamelCase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , add_bos_token=SCREAMING_SNAKE_CASE_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase_ = 'Encode this.'
lowerCamelCase_ = 'This one too please.'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = encoded_sequence_dict['input_ids']
lowerCamelCase_ = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE_ )
]
lowerCamelCase_ = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'A photo of a cat'
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('./test_opt' )
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [2, 250, 1345, 9, 10, 4758] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'A photo of a cat'
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'bos'
lowerCamelCase_ = tokenizer.get_vocab()['bos']
lowerCamelCase_ = 'A photo of a cat'
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [31957, 250, 1345, 9, 10, 4758] )
| 384 | 0 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 425 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Tuple ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : str=13 ,lowerCamelCase__ : Union[str, Any]=7 ,lowerCamelCase__ : List[str]=14 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : int=19 ,lowerCamelCase__ : Optional[int]=5 ,lowerCamelCase__ : str=4 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : List[str]=4 ,lowerCamelCase__ : Union[str, Any]=4 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : int=[1, 2, 3, 4, 5] ,lowerCamelCase__ : Any=25 ,lowerCamelCase__ : List[Any]=5 ,) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = prediction_length
SCREAMING_SNAKE_CASE = context_length
SCREAMING_SNAKE_CASE = cardinality
SCREAMING_SNAKE_CASE = num_time_features
SCREAMING_SNAKE_CASE = lags_sequence
SCREAMING_SNAKE_CASE = embedding_dimension
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = context_length
SCREAMING_SNAKE_CASE = prediction_length + label_length
SCREAMING_SNAKE_CASE = label_length
SCREAMING_SNAKE_CASE = moving_average
SCREAMING_SNAKE_CASE = autocorrelation_factor
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,prediction_length=self.prediction_length ,context_length=self.context_length ,label_length=self.label_length ,lags_sequence=self.lags_sequence ,num_time_features=self.num_time_features ,num_static_categorical_features=1 ,cardinality=[self.cardinality] ,embedding_dimension=[self.embedding_dimension] ,moving_average=self.moving_average ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = config.context_length + max(config.lags_sequence )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, 1] ,config.cardinality[0] )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, _past_length] )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, config.prediction_length] )
SCREAMING_SNAKE_CASE = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = self.prepare_autoformer_inputs_dict(lowerCamelCase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : str ,lowerCamelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoformerModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = model.get_encoder()
encoder.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoformerEncoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = model.create_network_inputs(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
SCREAMING_SNAKE_CASE = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) ,dim=-1 ,)
SCREAMING_SNAKE_CASE = encoder(inputs_embeds=lowerCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
SCREAMING_SNAKE_CASE = (
torch.mean(transformer_inputs[:, : config.context_length, ...] ,dim=1 )
.unsqueeze(1 )
.repeat(1 ,config.prediction_length ,1 )
)
SCREAMING_SNAKE_CASE = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] ,device=enc_input.device ,)
SCREAMING_SNAKE_CASE = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
SCREAMING_SNAKE_CASE = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = model.get_decoder()
decoder.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoformerDecoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = decoder(
trend=lowerCamelCase__ ,inputs_embeds=lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__snake_case : Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__snake_case : str = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
__snake_case : Optional[Any] = False
__snake_case : List[Any] = False
__snake_case : Tuple = False
__snake_case : Union[str, Any] = False
__snake_case : Union[str, Any] = False
__snake_case : Dict = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = model_class.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertEqual(info["""missing_keys"""] ,[] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.signature(getattr(lowerCamelCase__ ,"""forward""" ) )
# The main input is the name of the argument after `self`
SCREAMING_SNAKE_CASE = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(lowerCamelCase__ )] ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""seq_length""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""decoder_seq_length""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""encoder_seq_length""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""d_model""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""num_attention_heads""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = d_model // num_attention_heads
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.encoder_attentions
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
# decoder attentions
SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertIsInstance(lowerCamelCase__ ,(list, tuple) )
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# cross attentions
SCREAMING_SNAKE_CASE = outputs.cross_attentions
self.assertIsInstance(lowerCamelCase__ ,(list, tuple) )
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(out_len + 2 ,len(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
@is_flaky()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __lowercase ( _SCREAMING_SNAKE_CASE="train-batch.pt" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = prepare_batch()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(
past_values=batch["""past_values"""] ,past_time_features=batch["""past_time_features"""] ,past_observed_mask=batch["""past_observed_mask"""] ,static_categorical_features=batch["""static_categorical_features"""] ,future_values=batch["""future_values"""] ,future_time_features=batch["""future_time_features"""] ,)[0]
SCREAMING_SNAKE_CASE = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] ,device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] ,lowerCamelCase__ ,atol=lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(
past_values=batch["""past_values"""] ,past_time_features=batch["""past_time_features"""] ,past_observed_mask=batch["""past_observed_mask"""] ,static_categorical_features=batch["""static_categorical_features"""] ,).encoder_last_hidden_state
SCREAMING_SNAKE_CASE = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] ,device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] ,lowerCamelCase__ ,atol=lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model.generate(
static_categorical_features=batch["""static_categorical_features"""] ,past_time_features=batch["""past_time_features"""] ,past_values=batch["""past_values"""] ,future_time_features=batch["""future_time_features"""] ,past_observed_mask=batch["""past_observed_mask"""] ,)
SCREAMING_SNAKE_CASE = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([3130.6763, 4056.5293, 7053.0786] ,device=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] ,lowerCamelCase__ ,rtol=1e-1 ) )
| 706 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "summarization"
__snake_case : List[str] = ["loss"]
__snake_case : Optional[int] = ROUGE_KEYS
__snake_case : List[Any] = "rouge2"
def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCamelCase__ ,num_labels=lowerCamelCase__ ,mode=self.mode ,**lowerCamelCase__ )
use_task_specific_params(self.model ,"""summarization""" )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE = Path(self.output_dir ) / """metrics.json"""
SCREAMING_SNAKE_CASE = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams ,self.hparams_save_path )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = defaultdict(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.config.model_type
SCREAMING_SNAKE_CASE = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
SCREAMING_SNAKE_CASE = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
SCREAMING_SNAKE_CASE = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE = get_git_info()["""repo_sha"""]
SCREAMING_SNAKE_CASE = hparams.num_workers
SCREAMING_SNAKE_CASE = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE = self.decoder_start_token_id
SCREAMING_SNAKE_CASE = (
SeqaSeqDataset if hasattr(self.tokenizer ,"""prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE = self.model.config.max_length
SCREAMING_SNAKE_CASE = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase__ ,Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / """tok_batch.json""" )
SCREAMING_SNAKE_CASE = True
return readable_batch
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : str ,**lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.model(lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(
lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ,clean_up_tokenization_spaces=lowerCamelCase__ )
return lmap(str.strip ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = batch["""input_ids"""], batch["""attention_mask"""]
SCREAMING_SNAKE_CASE = batch["""labels"""]
if isinstance(self.model ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = self.model._shift_right(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCamelCase__ ,lowerCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE = decoder_input_ids
self.save_readable_batch(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,decoder_input_ids=lowerCamelCase__ ,use_cache=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss(ignore_index=lowerCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE = nn.functional.log_softmax(lowerCamelCase__ ,dim=-1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = label_smoothed_nll_loss(
lowerCamelCase__ ,lowerCamelCase__ ,self.hparams.label_smoothing ,ignore_index=lowerCamelCase__ )
return (loss,)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._step(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = dict(zip(self.loss_names ,lowerCamelCase__ ) )
# tokens per batch
SCREAMING_SNAKE_CASE = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE = batch["""input_ids"""].shape[0]
SCREAMING_SNAKE_CASE = batch["""input_ids"""].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
return self._generative_step(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str]="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
SCREAMING_SNAKE_CASE = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE = losses["""loss"""]
SCREAMING_SNAKE_CASE = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
SCREAMING_SNAKE_CASE = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase__ ).type_as(lowerCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
SCREAMING_SNAKE_CASE = self.step_count
self.metrics[prefix].append(lowerCamelCase__ ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
return calculate_rouge(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : dict ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE = self.model.generate(
batch["""input_ids"""] ,attention_mask=batch["""attention_mask"""] ,use_cache=lowerCamelCase__ ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
SCREAMING_SNAKE_CASE = (time.time() - ta) / batch["""input_ids"""].shape[0]
SCREAMING_SNAKE_CASE = self.ids_to_clean_text(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.ids_to_clean_text(batch["""labels"""] )
SCREAMING_SNAKE_CASE = self._step(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = dict(zip(self.loss_names ,lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = self.calc_generative_metrics(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = np.mean(lmap(lowerCamelCase__ ,lowerCamelCase__ ) )
base_metrics.update(gen_time=lowerCamelCase__ ,gen_len=lowerCamelCase__ ,preds=lowerCamelCase__ ,target=lowerCamelCase__ ,**lowerCamelCase__ )
return base_metrics
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
return self._generative_step(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.validation_epoch_end(lowerCamelCase__ ,prefix="""test""" )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tuple ) -> SeqaSeqDataset:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.n_obs[type_path]
SCREAMING_SNAKE_CASE = self.target_lens[type_path]
SCREAMING_SNAKE_CASE = self.dataset_class(
self.tokenizer ,type_path=lowerCamelCase__ ,n_obs=lowerCamelCase__ ,max_target_length=lowerCamelCase__ ,**self.dataset_kwargs ,)
return dataset
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : bool = False ) -> DataLoader:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_dataset(lowerCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE = dataset.make_sortish_sampler(lowerCamelCase__ ,distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ ,batch_size=lowerCamelCase__ ,collate_fn=dataset.collate_fn ,shuffle=lowerCamelCase__ ,num_workers=self.num_workers ,sampler=lowerCamelCase__ ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ ,batch_sampler=lowerCamelCase__ ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
lowerCamelCase__ ,batch_size=lowerCamelCase__ ,collate_fn=dataset.collate_fn ,shuffle=lowerCamelCase__ ,num_workers=self.num_workers ,sampler=lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> DataLoader:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" ,batch_size=self.hparams.train_batch_size ,shuffle=lowerCamelCase__ )
return dataloader
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""val""" ,batch_size=self.hparams.eval_batch_size )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""test""" ,batch_size=self.hparams.eval_batch_size )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowerCamelCase__ ,lowerCamelCase__ )
add_generic_args(lowerCamelCase__ ,lowerCamelCase__ )
parser.add_argument(
"""--max_source_length""" ,default=1024 ,type=lowerCamelCase__ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--max_target_length""" ,default=56 ,type=lowerCamelCase__ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--val_max_target_length""" ,default=142 ,type=lowerCamelCase__ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--test_max_target_length""" ,default=142 ,type=lowerCamelCase__ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument("""--freeze_encoder""" ,action="""store_true""" )
parser.add_argument("""--freeze_embeds""" ,action="""store_true""" )
parser.add_argument("""--sortish_sampler""" ,action="""store_true""" ,default=lowerCamelCase__ )
parser.add_argument("""--overwrite_output_dir""" ,action="""store_true""" ,default=lowerCamelCase__ )
parser.add_argument("""--max_tokens_per_batch""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ )
parser.add_argument("""--logger_name""" ,type=lowerCamelCase__ ,choices=["""default""", """wandb""", """wandb_shared"""] ,default="""default""" )
parser.add_argument("""--n_train""" ,type=lowerCamelCase__ ,default=-1 ,required=lowerCamelCase__ ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" ,type=lowerCamelCase__ ,default=500 ,required=lowerCamelCase__ ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" ,type=lowerCamelCase__ ,default=-1 ,required=lowerCamelCase__ ,help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" ,type=lowerCamelCase__ ,default="""summarization""" ,required=lowerCamelCase__ ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" ,type=lowerCamelCase__ ,default=0.0 ,required=lowerCamelCase__ )
parser.add_argument("""--src_lang""" ,type=lowerCamelCase__ ,default="""""" ,required=lowerCamelCase__ )
parser.add_argument("""--tgt_lang""" ,type=lowerCamelCase__ ,default="""""" ,required=lowerCamelCase__ )
parser.add_argument("""--eval_beams""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,required=lowerCamelCase__ )
parser.add_argument(
"""--val_metric""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,required=lowerCamelCase__ ,choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" ,type=lowerCamelCase__ ,default=1 ,required=lowerCamelCase__ ,help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" ,type=lowerCamelCase__ ,default=-1 ,required=lowerCamelCase__ ,help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) ,)
return parser
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = "translation"
__snake_case : Tuple = ["loss"]
__snake_case : Union[str, Any] = ["bleu"]
__snake_case : str = "bleu"
def __init__( self : str ,lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hparams.src_lang
SCREAMING_SNAKE_CASE = hparams.tgt_lang
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ) -> dict:
'''simple docstring'''
return calculate_bleu(lowerCamelCase__ ,lowerCamelCase__ )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
check_output_dir(_SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE = SummarizationModule(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = TranslationModule(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
SCREAMING_SNAKE_CASE = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE = os.environ.get("""WANDB_PROJECT""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = WandbLogger(name=model.output_dir.name , project=_SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = args.val_metric == """loss"""
SCREAMING_SNAKE_CASE = generic_train(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _SCREAMING_SNAKE_CASE ) , early_stopping_callback=_SCREAMING_SNAKE_CASE , logger=_SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE = """"""
SCREAMING_SNAKE_CASE = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_SCREAMING_SNAKE_CASE ) )
if checkpoints:
SCREAMING_SNAKE_CASE = checkpoints[-1]
SCREAMING_SNAKE_CASE = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE_ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE_ = parser.parse_args()
main(args)
| 116 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def _A ( A ,A ,A ,A ) -> Optional[int]:
lowercase : Dict = 0
# Number of processes finished
lowercase : Tuple = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase : Any = [0] * no_of_process
# List to include calculation results
lowercase : Union[str, Any] = [0] * no_of_process
# Sort by arrival time.
lowercase : Union[str, Any] = [burst_time[i] for i in np.argsort(a__ )]
lowercase : Union[str, Any] = [process_name[i] for i in np.argsort(a__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase : Optional[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase : Optional[Any] = arrival_time[i]
lowercase : Optional[Any] = 0
# Index showing the location of the process being performed
lowercase : Optional[int] = 0
# Saves the current response ratio.
lowercase : Tuple = 0
for i in range(0 ,a__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase : List[Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase : int = temp
lowercase : List[str] = i
# Calculate the turn around time
lowercase : int = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase : Dict = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _A ( A ,A ,A ,A ) -> Tuple:
lowercase : Tuple = [0] * no_of_process
for i in range(0 ,a__ ):
lowercase : Optional[int] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = 5
lowerCAmelCase : Union[str, Any] = ["""A""", """B""", """C""", """D""", """E"""]
lowerCAmelCase : int = [1, 2, 3, 4, 5]
lowerCAmelCase : Optional[int] = [1, 2, 3, 4, 5]
lowerCAmelCase : Tuple = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase : str = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 372 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : List[Any] = 0
if start < end:
a_ : Dict = randint(a__ , a__)
a_ : List[str] = a[end]
a_ : Tuple = a[pivot]
a_ : Tuple = temp
a_ , a_ : List[Any] = _in_place_partition(a__ , a__ , a__)
count += _in_place_quick_sort(a__ , a__ , p - 1)
count += _in_place_quick_sort(a__ , p + 1 , a__)
return count
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : Optional[int] = 0
a_ : Union[str, Any] = randint(a__ , a__)
a_ : Union[str, Any] = a[end]
a_ : Any = a[pivot]
a_ : Any = temp
a_ : int = start - 1
for index in range(a__ , a__):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a_ : str = new_pivot_index + 1
a_ : Optional[Any] = a[new_pivot_index]
a_ : str = a[index]
a_ : Union[str, Any] = temp
a_ : Union[str, Any] = a[new_pivot_index + 1]
a_ : Tuple = a[end]
a_ : Any = temp
return new_pivot_index + 1, count
__snake_case : Union[str, Any] = TemporaryFile()
__snake_case : Dict = 1_00 # 1000 elements are to be sorted
__snake_case , __snake_case : int = 0, 1 # mean and standard deviation
__snake_case : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
__snake_case : str = np.load(outfile)
__snake_case : Dict = len(M) - 1
__snake_case : Dict = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 540 | 0 |
def A ( UpperCAmelCase ):
if n == 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_snake_case : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A ( UpperCAmelCase ):
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 2
while digits < n:
index += 1
_snake_case : str = len(str(fibonacci(UpperCAmelCase ) ) )
return index
def A ( UpperCAmelCase = 1_000 ):
return fibonacci_digits_index(UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 278 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A ( UpperCAmelCase ):
if hor == 128:
_snake_case : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_snake_case : Tuple = (32, 128, 256)
_snake_case : str = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
_snake_case : List[Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_snake_case : Optional[int] = (32, 64, 128, 256)
_snake_case : int = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
_snake_case : Dict = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_snake_case : Dict = model.state_dict()
_snake_case : Tuple = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
_snake_case : Any = UNetaDModel(**UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Optional[int] = state_dict.pop(UpperCAmelCase )
hf_value_function.load_state_dict(UpperCAmelCase )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A ( ):
_snake_case : Any = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
_snake_case : Dict = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
_snake_case : Optional[int] = model
_snake_case : List[str] = UNetaDModel(**UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : Optional[int] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Any = state_dict.pop(UpperCAmelCase )
hf_value_function.load_state_dict(UpperCAmelCase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 278 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCamelCase : Union[str, Any] =logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCamelCase : List[str] ={
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCamelCase : Optional[int] ={
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCamelCase : Any =sorted(arg_to_scheduler.keys())
lowerCamelCase : str ='''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class __a ( pl.LightningModule ):
def __init__( self : str , SCREAMING_SNAKE_CASE : argparse.Namespace , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Tuple="base" , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = Path(self.hparams.output_dir )
UpperCamelCase__ : List[Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCamelCase__ : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
else:
UpperCamelCase__ : PretrainedConfig = config
UpperCamelCase__ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , SCREAMING_SNAKE_CASE ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , SCREAMING_SNAKE_CASE , getattr(self.hparams , SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
UpperCamelCase__ : str = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=SCREAMING_SNAKE_CASE , )
else:
UpperCamelCase__ : PreTrainedTokenizer = tokenizer
UpperCamelCase__ : str = MODEL_MODES[mode]
if model is None:
UpperCamelCase__ : Optional[int] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=SCREAMING_SNAKE_CASE , )
else:
UpperCamelCase__ : Tuple = model
def __lowercase ( self : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : str = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCamelCase__ : Any = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCamelCase__ : List[str] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.model
UpperCamelCase__ : Tuple = ["bias", "LayerNorm.weight"]
UpperCamelCase__ : Tuple = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
UpperCamelCase__ : List[Any] = Adafactor(
SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=SCREAMING_SNAKE_CASE , relative_step=SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Dict = AdamW(
SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCamelCase__ : List[Any] = optimizer
UpperCamelCase__ : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
return self.validation_step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return self.validation_end(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Dict = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCamelCase__ : Optional[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if stage == "test":
UpperCamelCase__ : Tuple = len(self.test_dataloader().dataset )
else:
UpperCamelCase__ : List[Any] = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = len(self.train_dataloader().dataset )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
raise NotImplementedError("You must implement this for your task" )
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.train_loader
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
SCREAMING_SNAKE_CASE , list(filter(SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Dict[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.output_dir.joinpath("best_tfmr" )
UpperCamelCase__ : Optional[Any] = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
@staticmethod
def __lowercase ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
parser.add_argument(
"--model_name_or_path" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=SCREAMING_SNAKE_CASE , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(SCREAMING_SNAKE_CASE ).parent / "test_run" / "cache" ) , type=SCREAMING_SNAKE_CASE , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=SCREAMING_SNAKE_CASE , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=SCREAMING_SNAKE_CASE , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=SCREAMING_SNAKE_CASE , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=SCREAMING_SNAKE_CASE , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=SCREAMING_SNAKE_CASE , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=SCREAMING_SNAKE_CASE , metavar=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=SCREAMING_SNAKE_CASE , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=SCREAMING_SNAKE_CASE )
parser.add_argument("--train_batch_size" , default=32 , type=SCREAMING_SNAKE_CASE )
parser.add_argument("--eval_batch_size" , default=32 , type=SCREAMING_SNAKE_CASE )
parser.add_argument("--adafactor" , action="store_true" )
class __a ( pl.Callback ):
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __a ( pl.Callback ):
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE )
class __a ( pl.Callback ):
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = trainer.lr_schedulers[0]["scheduler"]
UpperCamelCase__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : pl.Trainer , SCREAMING_SNAKE_CASE : pl.LightningModule ):
'''simple docstring'''
rank_zero_info("***** Validation results *****" )
UpperCamelCase__ : Dict = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : pl.Trainer , SCREAMING_SNAKE_CASE : pl.LightningModule ):
'''simple docstring'''
rank_zero_info("***** Test results *****" )
UpperCamelCase__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
UpperCamelCase__ : Optional[int] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir" , default=str(Path(__lowerCAmelCase ).parent / "test_run" / "model_checkpoints" ) , type=__lowerCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowerCAmelCase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=__lowerCAmelCase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=__lowerCAmelCase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=__lowerCAmelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=__lowerCAmelCase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(__lowerCAmelCase ).parent / "test_run" / "dummy-train-data" ) , type=__lowerCAmelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[] , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> List[str]:
pl.seed_everything(args.seed )
# init model
UpperCamelCase__ : Optional[Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
UpperCamelCase__ : List[str] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__lowerCAmelCase )
if logging_callback is None:
UpperCamelCase__ : str = LoggingCallback()
UpperCamelCase__ : Optional[int] = {}
if args.fpaa:
UpperCamelCase__ : Union[str, Any] = 16
if args.gpus > 1:
UpperCamelCase__ : List[str] = "auto"
UpperCamelCase__ : int = "ddp"
UpperCamelCase__ : str = args.accumulate_grad_batches
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : int = "auto"
UpperCamelCase__ : Tuple = pl.Trainer.from_argparse_args(
__lowerCAmelCase , weights_summary=__lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__lowerCAmelCase , )
if args.do_train:
trainer.fit(__lowerCAmelCase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer | 228 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a ( unittest.TestCase ):
@property
def __lowercase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.dummy_uncond_unet
UpperCamelCase__ : List[str] = ScoreSdeVeScheduler()
UpperCamelCase__ : Union[str, Any] = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
sde_ve.to(SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE ).images
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : Any = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )[
0
]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __a ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = "google/ncsnpp-church-256"
UpperCamelCase__ : Tuple = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
sde_ve.to(SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : Tuple = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE ).images
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase__ : Any = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 228 | 1 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
A = 'naver-clova-ix/donut-base'
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : int ) -> str:
'''simple docstring'''
A = DonutProcessor.from_pretrained(snake_case )
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
A = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
A = self.processor.tokenajson(snake_case )
self.assertDictEqual(snake_case , snake_case )
| 713 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ : int = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def A_ ( self : Tuple ) -> int:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
A = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-0_5, 'token': 38_015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-0_5, 'token': 25_506, 'token_str': ' accuser'},
] , )
A = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-0_5,
'token': 38_015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-0_5,
'token': 25_506,
'token_str': ' accuser',
},
] , )
A = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-0_5, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-0_5, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-0_5, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def A_ ( self : str ) -> int:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
A = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-0_5, 'token': 35_676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS'},
] , )
A = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-0_5,
'token': 35_676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS'},
] , )
A = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-0_5, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-0_5, 'token': 2_941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-0_5, 'token': 13_606, 'token_str': ' Clara'},
] , )
A = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
[
{
'score': 2.2E-0_5,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-0_5,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
A = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case , snake_case )
@slow
@require_torch
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(snake_case )
@slow
@require_tf
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(snake_case )
def A_ ( self : Dict , snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1_573, 'token_str': ' Chris'},
] , )
A = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2_201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 12_790,
'token_str': ' Lyon',
},
] , )
A = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
A = None
A = None
self.run_pipeline_test(snake_case , [] )
@require_tf
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
A = None
A = None
self.run_pipeline_test(snake_case , [] )
def A_ ( self : str , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def A_ ( self : Any , snake_case : Any , snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A = fill_masker.tokenizer
A = fill_masker.model
A = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
snake_case , [
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
] , )
with self.assertRaises(snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case ):
fill_masker('This is' )
self.run_test_top_k(snake_case , snake_case )
self.run_test_targets(snake_case , snake_case )
self.run_test_top_k_targets(snake_case , snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case , snake_case )
self.fill_mask_with_multiple_masks(snake_case , snake_case )
def A_ ( self : str , snake_case : Any , snake_case : Optional[int] ) -> str:
'''simple docstring'''
A = tokenizer.get_vocab()
A = sorted(vocab.keys() )[:2]
# Pipeline argument
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case , targets=snake_case )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , snake_case )
A = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(snake_case ) )
# Call argument
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , snake_case )
A = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(snake_case ) )
# Score equivalence
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
A = [top_mask['token_str'] for top_mask in outputs]
A = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ) == set(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
A = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
# Raises with invalid
with self.assertRaises(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def A_ ( self : Any , snake_case : Optional[Any] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case , top_k=2 )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def A_ ( self : str , snake_case : List[str] , snake_case : Dict ) -> Tuple:
'''simple docstring'''
A = tokenizer.get_vocab()
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
# top_k=2, ntargets=3
A = sorted(vocab.keys() )[:3]
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A = [el['token_str'] for el in sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ).issubset(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def A_ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Dict ) -> int:
'''simple docstring'''
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = tokenizer.get_vocab()
# String duplicates + id duplicates
A = sorted(vocab.keys() )[:3]
A = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case ) , 3 )
def A_ ( self : str , snake_case : List[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
] , )
| 109 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(A ):
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A, A )
SCREAMING_SNAKE_CASE : Tuple = FlaxAutoModel.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A, A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(A ):
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A, A )
SCREAMING_SNAKE_CASE : str = FlaxAutoModel.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A, A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(A )
SCREAMING_SNAKE_CASE : List[str] = FlaxBertModel.from_pretrained(A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX )
@jax.jit
def eval(**A ):
return model(**A )
eval(**A ).block_until_ready()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = FlaxRobertaModel.from_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX )
@jax.jit
def eval(**A ):
return model(**A )
eval(**A ).block_until_ready()
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A, 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE : Optional[int] = FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModel.from_pretrained(A, revision='aaaaaa' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A, 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack', ):
SCREAMING_SNAKE_CASE : Optional[int] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(A, 'Use `from_pt=True` to load this model' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 28 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 0 |
def _lowerCAmelCase( __A ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase = len(bin(__A )[3:] )
UpperCAmelCase = bin(abs(__A ) - (1 << binary_number_length) )[3:]
UpperCAmelCase = (
(
"1"
+ "0" * (binary_number_length - len(__A ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 52 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list:
a = False
while is_sorted is False: # Until all the indices are traversed keep looping
a = True
for i in range(0 , len(__UpperCamelCase) - 1 , 2): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
a , a = input_list[i + 1], input_list[i]
# swapping if elements not in order
a = False
for i in range(1 , len(__UpperCamelCase) - 1 , 2): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
a , a = input_list[i + 1], input_list[i]
# swapping if elements not in order
a = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
lowercase__ : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowercase__ : Any = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 515 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
A = 1.054_571_817E-34 # unit of ℏ : J * s
A = 3E8 # unit of c : m * s^-1
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
A = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCAmelCase__ ( ) -> Optional[Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
A = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCAmelCase__ ( ) -> str:
assert _test_patching.open is open
A = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , lowerCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCAmelCase__ ( ) -> List[Any]:
# pandas.read_csv is not present in _test_patching
A = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , lowerCamelCase__ ):
pass
def lowerCAmelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
A = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , lowerCamelCase__ ) is None
with patch_submodule(_test_patching , 'len' , lowerCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCAmelCase__ ( ) -> Union[str, Any]:
A = '__test_patch_submodule_start_and_stop_mock__'
A = patch_submodule(_test_patching , 'open' , lowerCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCAmelCase__ ( ) -> int:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
A = '__test_patch_submodule_successive_join__'
A = '__test_patch_submodule_successive_dirname__'
A = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.rename' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCAmelCase__ ( ) -> Optional[Any]:
A = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , lowerCamelCase__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , lowerCamelCase__ ):
pass
| 109 | 1 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : Any , __A : Optional[Any] , __A : List[Any] ):
def get_masked_lm_array(__A : str ):
a_ : Any = f'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a_ : Dict = tf.train.load_variable(a_ , a_ )
if "kernel" in name:
a_ : Tuple = array.transpose()
return torch.from_numpy(a_ )
def get_encoder_array(__A : List[str] ):
a_ : str = f'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a_ : List[str] = tf.train.load_variable(a_ , a_ )
if "kernel" in name:
a_ : Dict = array.transpose()
return torch.from_numpy(a_ )
def get_encoder_layer_array(__A : List[str] , __A : Union[str, Any] ):
a_ : Optional[int] = f'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a_ : str = tf.train.load_variable(a_ , a_ )
if "kernel" in name:
a_ : Dict = array.transpose()
return torch.from_numpy(a_ )
def get_encoder_attention_layer_array(__A : Optional[int] , __A : Tuple , __A : Optional[Any] ):
a_ : Optional[int] = f'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a_ : str = tf.train.load_variable(a_ , a_ )
a_ : List[Any] = array.reshape(a_ )
if "kernel" in name:
a_ : Any = array.transpose()
return torch.from_numpy(a_ )
print(f'Loading model based on config from {config_path}...' )
a_ : Any = BertConfig.from_json_file(a_ )
a_ : int = BertForMaskedLM(a_ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
a_ : Optional[Any] = model.bert.encoder.layer[layer_index]
# Self-attention
a_ : List[str] = layer.attention.self
a_ : int = get_encoder_attention_layer_array(
a_ , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
a_ : Dict = get_encoder_attention_layer_array(
a_ , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
a_ : int = get_encoder_attention_layer_array(
a_ , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
a_ : List[str] = get_encoder_attention_layer_array(
a_ , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
a_ : str = get_encoder_attention_layer_array(
a_ , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
a_ : Optional[Any] = get_encoder_attention_layer_array(
a_ , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
a_ : int = layer.attention.output
a_ : List[Any] = get_encoder_attention_layer_array(
a_ , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
a_ : int = get_encoder_attention_layer_array(
a_ , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
a_ : Union[str, Any] = get_encoder_layer_array(a_ , '''_attention_layer_norm/gamma''' )
a_ : Union[str, Any] = get_encoder_layer_array(a_ , '''_attention_layer_norm/beta''' )
# Intermediate
a_ : Dict = layer.intermediate
a_ : Union[str, Any] = get_encoder_layer_array(a_ , '''_intermediate_dense/kernel''' )
a_ : Any = get_encoder_layer_array(a_ , '''_intermediate_dense/bias''' )
# Output
a_ : List[str] = layer.output
a_ : Tuple = get_encoder_layer_array(a_ , '''_output_dense/kernel''' )
a_ : int = get_encoder_layer_array(a_ , '''_output_dense/bias''' )
a_ : Any = get_encoder_layer_array(a_ , '''_output_layer_norm/gamma''' )
a_ : str = get_encoder_layer_array(a_ , '''_output_layer_norm/beta''' )
# Embeddings
a_ : Optional[Any] = get_encoder_array('''_position_embedding_layer/embeddings''' )
a_ : List[Any] = get_encoder_array('''_type_embedding_layer/embeddings''' )
a_ : Dict = get_encoder_array('''_embedding_norm_layer/gamma''' )
a_ : Optional[int] = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
a_ : str = model.cls.predictions.transform
a_ : List[Any] = get_masked_lm_array('''dense/kernel''' )
a_ : int = get_masked_lm_array('''dense/bias''' )
a_ : Union[str, Any] = get_masked_lm_array('''layer_norm/gamma''' )
a_ : str = get_masked_lm_array('''layer_norm/beta''' )
a_ : Optional[Any] = get_masked_lm_array('''embedding_table''' )
# Pooling
a_ : Optional[int] = BertPooler(config=a_ )
a_ : Optional[Any] = get_encoder_array('''_pooler_layer/kernel''' )
a_ : List[str] = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(a_ )
# Integration test - should load without any errors ;)
a_ : List[Any] = BertForMaskedLM.from_pretrained(a_ )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__lowerCAmelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 466 |
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
def update_area_of_max_square(a_ , a_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCAmelCase_ = update_area_of_max_square(a_ , col + 1 )
lowerCAmelCase_ = update_area_of_max_square(row + 1 , col + 1 )
lowerCAmelCase_ = update_area_of_max_square(row + 1 , a_ )
if mat[row][col]:
lowerCAmelCase_ = 1 + min([right, diagonal, down] )
lowerCAmelCase_ = max(largest_square_area[0] , a_ )
return sub_problem_sol
else:
return 0
lowerCAmelCase_ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
def update_area_of_max_square_using_dp_array(
a_ , a_ , a_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCAmelCase_ = update_area_of_max_square_using_dp_array(a_ , col + 1 , a_ )
lowerCAmelCase_ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , a_ )
lowerCAmelCase_ = update_area_of_max_square_using_dp_array(row + 1 , a_ , a_ )
if mat[row][col]:
lowerCAmelCase_ = 1 + min([right, diagonal, down] )
lowerCAmelCase_ = max(largest_square_area[0] , a_ )
lowerCAmelCase_ = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCAmelCase_ = [0]
lowerCAmelCase_ = [[-1] * cols for _ in range(a_ )]
update_area_of_max_square_using_dp_array(0 , 0 , a_ )
return largest_square_area[0]
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCAmelCase_ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase_ = dp_array[row][col + 1]
lowerCAmelCase_ = dp_array[row + 1][col + 1]
lowerCAmelCase_ = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCAmelCase_ = 1 + min(a_ , a_ , a_ )
lowerCAmelCase_ = max(dp_array[row][col] , a_ )
else:
lowerCAmelCase_ = 0
return largest_square_area
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = [0] * (cols + 1)
lowerCAmelCase_ = [0] * (cols + 1)
lowerCAmelCase_ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase_ = current_row[col + 1]
lowerCAmelCase_ = next_row[col + 1]
lowerCAmelCase_ = next_row[col]
if mat[row][col] == 1:
lowerCAmelCase_ = 1 + min(a_ , a_ , a_ )
lowerCAmelCase_ = max(current_row[col] , a_ )
else:
lowerCAmelCase_ = 0
lowerCAmelCase_ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 318 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Dict = {"""vocab_file""": """vocab.txt"""}
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__lowerCamelCase : Optional[int] = {
"""openbmb/cpm-ant-10b""": 10_24,
}
def A__ ( _a : int ):
'''simple docstring'''
snake_case__ : Any =collections.OrderedDict()
with open(_a , """r""" , encoding="""utf-8""" ) as reader:
snake_case__ : str =reader.readlines()
for index, token in enumerate(_a ):
snake_case__ : Dict =token.rstrip("""\n""" )
snake_case__ : Dict =index
return vocab
class _lowercase ( _A ):
def __init__( self , a , a="<unk>" , a=2_0_0 ):
snake_case__ : Any =vocab
snake_case__ : List[str] =unk_token
snake_case__ : int =max_input_chars_per_word
def lowercase__ ( self , a ):
snake_case__ : int =list(a )
if len(a ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case__ : int =0
snake_case__ : List[str] =[]
while start < len(a ):
snake_case__ : Union[str, Any] =len(a )
snake_case__ : int =None
while start < end:
snake_case__ : Optional[Any] ="""""".join(chars[start:end] )
if substr in self.vocab:
snake_case__ : int =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a )
snake_case__ : Dict =end
return sub_tokens
class _lowercase ( _A ):
_a : List[str] = VOCAB_FILES_NAMES
_a : Any = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : List[str] = ['input_ids', 'attention_mask']
_a : List[Any] = False
def __init__( self , a , a="<d>" , a="</d>" , a="<s>" , a="</s>" , a="<pad>" , a="<unk>" , a="</n>" , a="</_>" , a="left" , **a , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=a , eod_token=a , bos_token=a , eos_token=a , pad_token=a , unk_token=a , line_token=a , space_token=a , padding_side=a , **a , )
snake_case__ : int =bod_token
snake_case__ : Union[str, Any] =eod_token
snake_case__ : Tuple =load_vocab(a )
snake_case__ : Union[str, Any] =self.encoder[space_token]
snake_case__ : Optional[Any] =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case__ : List[Any] =collections.OrderedDict(sorted(self.encoder.items() , key=lambda a : x[1] ) )
snake_case__ : str ={v: k for k, v in self.encoder.items()}
snake_case__ : Dict =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase__ ( self ):
return self.encoder[self.bod_token]
@property
def lowercase__ ( self ):
return self.encoder[self.eod_token]
@property
def lowercase__ ( self ):
return self.encoder["\n"]
@property
def lowercase__ ( self ):
return len(self.encoder )
def lowercase__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self , a ):
snake_case__ : Dict =[]
for x in jieba.cut(a , cut_all=a ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a ) )
return output_tokens
def lowercase__ ( self , a , **a ):
snake_case__ : Any =[i for i in token_ids if i >= 0]
snake_case__ : int =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a , **a )
def lowercase__ ( self , a ):
return token in self.encoder
def lowercase__ ( self , a ):
return "".join(a )
def lowercase__ ( self , a ):
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def lowercase__ ( self , a ):
return self.decoder.get(a , self.unk_token )
def lowercase__ ( self , a , a = None ):
if os.path.isdir(a ):
snake_case__ : Union[str, Any] =os.path.join(
a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case__ : Dict =(filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case__ : Any =0
if " " in self.encoder:
snake_case__ : int =self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case__ : Tuple =self.encoder["""\n"""]
del self.encoder["\n"]
snake_case__ : Optional[int] =collections.OrderedDict(sorted(self.encoder.items() , key=lambda a : x[1] ) )
with open(a , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
snake_case__ : str =token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowercase__ ( self , a , a = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase__ ( self , a , a = None , a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a ))
return [1] + ([0] * len(a ))
| 448 |
def A__ ( _a : Optional[Any] , _a : Tuple , _a : List[str]=False ):
'''simple docstring'''
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case__ : int =len(set_a.intersection(_a ) )
if alternative_union:
snake_case__ : int =len(_a ) + len(_a )
else:
snake_case__ : Tuple =len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case__ : Optional[int] =[element for element in set_a if element in set_b]
if alternative_union:
snake_case__ : Optional[Any] =len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case__ : Dict =set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
__lowerCamelCase : List[Any] = {"""a""", """b""", """c""", """d""", """e"""}
__lowerCamelCase : Optional[int] = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 448 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowercase__:
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=1_3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Tuple=9_9 , SCREAMING_SNAKE_CASE_ : List[str]=3_2 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE_ : str=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]="None" , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , SCREAMING_SNAKE_CASE_ : str=None , ) -> str:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = relative_attention
lowercase_ = position_biased_input
lowercase_ = pos_att_type
lowercase_ = scope
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
lowercase_ = TFDebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = [input_ids, input_mask]
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
lowercase_ = TFDebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = TFDebertaVaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
lowercase_ = self.num_labels
lowercase_ = TFDebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
lowercase_ = TFDebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : List[str] ) -> Optional[int]:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
a :int = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
a :int = False
a :Any = False
def _lowercase ( self : Dict ) -> str:
lowercase_ = TFDebertaVaModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> str:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Tuple ) -> Dict:
lowercase_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
pass
@slow
def _lowercase ( self : List[Any] ) -> Dict:
lowercase_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
lowercase_ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowercase_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
| 97 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case:
def __init__(self : Any , a : str , a : Union[str, Any]=12 , a : List[str]=7 , a : Dict=True , a : Tuple=True , a : Any=True , a : Optional[Any]=99 , a : Optional[Any]=32 , a : Tuple=32 , a : List[Any]=2 , a : str=4 , a : Dict=37 , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : Dict=5_12 , a : List[Any]=0.02 , a : Any=0 , a : Optional[int]=None , ) -> List[Any]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _UpperCamelCase (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(a )
def _UpperCamelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase (self : Optional[int] , a : Any , a : List[Any] , a : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ = TFBlipTextModel(config=a )
A__ = model(a , attention_mask=a , training=a )
A__ = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase (self : str ) -> List[Any]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case( UpperCAmelCase , unittest.TestCase ):
__snake_case: Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__snake_case: Union[str, Any] = False
__snake_case: Any = False
__snake_case: Union[str, Any] = False
def _UpperCamelCase (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase (self : Any ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase (self : Dict ) -> Dict:
"""simple docstring"""
pass
def _UpperCamelCase (self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase (self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCamelCase (self : str , a : Optional[int]=True ) -> List[Any]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=a )
| 531 | 0 |
import math
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Tuple = len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) )
lowerCamelCase : str = 0
while arr[min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - 1] < x:
lowerCamelCase : Any = step
step += int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCamelCase : int = prev + 1
if prev == min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
_snake_case = int(input('''Enter the number to be searched:\n'''))
_snake_case = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 231 |
from __future__ import annotations
import numpy as np
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : Dict = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
lowerCamelCase : int = (
"'table' has to be of square shaped array but got a "
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = np.zeros((rows, columns) )
lowerCamelCase : List[str] = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCamelCase : Dict = (table[i][j] - total) / upper[j][j]
lowerCamelCase : Dict = 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 | 1 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ = "allenai"
def A_ ( snake_case ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
SCREAMING_SNAKE_CASE:Optional[Any] = dict((re.sub(r"@@$" , "" , snake_case ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , snake_case ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE:Any = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
SCREAMING_SNAKE_CASE:int = d[k] # restore
return da
def A_ ( snake_case , snake_case ):
# prep
assert os.path.exists(snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
SCREAMING_SNAKE_CASE:Any = basename(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = dirname(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE:Optional[Any] = cls.hub_models()
SCREAMING_SNAKE_CASE:List[Any] = {"bpe": "fastbpe", "tokenizer": "moses"}
SCREAMING_SNAKE_CASE:Union[str, Any] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
SCREAMING_SNAKE_CASE:Tuple = hub_utils.from_pretrained(
snake_case , snake_case , snake_case , archive_map=snake_case , **snake_case )
SCREAMING_SNAKE_CASE:Any = vars(chkpt["args"]["model"] )
SCREAMING_SNAKE_CASE:Tuple = args["source_lang"]
SCREAMING_SNAKE_CASE:List[Any] = args["target_lang"]
SCREAMING_SNAKE_CASE:Tuple = dirname(snake_case )
SCREAMING_SNAKE_CASE:Any = basename(snake_case )
# dicts
SCREAMING_SNAKE_CASE:str = os.path.join(snake_case , F'''dict.{src_lang}.txt''' )
SCREAMING_SNAKE_CASE:Optional[int] = os.path.join(snake_case , F'''dict.{tgt_lang}.txt''' )
SCREAMING_SNAKE_CASE:Optional[Any] = Dictionary.load(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE:Optional[int] = len(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = os.path.join(snake_case , "vocab-src.json" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE:List[str] = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE:Any = False
break
SCREAMING_SNAKE_CASE:Union[str, Any] = Dictionary.load(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE:Optional[Any] = len(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = os.path.join(snake_case , "vocab-tgt.json" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE:Any = os.path.join(snake_case , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE:str = os.path.join(snake_case , snake_case )
if os.path.exists(snake_case ):
break
with open(snake_case , encoding="utf-8" ) as fin:
SCREAMING_SNAKE_CASE:Optional[Any] = fin.read()
SCREAMING_SNAKE_CASE:Any = re.sub(r" \d+$" , "" , snake_case , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(snake_case , "w" , encoding="utf-8" ) as fout:
fout.write(snake_case )
# model config
SCREAMING_SNAKE_CASE:int = os.path.join(snake_case , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
SCREAMING_SNAKE_CASE:Optional[int] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE:Optional[int] = 5
SCREAMING_SNAKE_CASE:Dict = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE:Union[str, Any] = best_score_hparams[model_dir]["length_penalty"]
else:
SCREAMING_SNAKE_CASE:Optional[int] = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# tokenizer config
SCREAMING_SNAKE_CASE:str = os.path.join(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# model
SCREAMING_SNAKE_CASE:List[Any] = chkpt["models"][0]
SCREAMING_SNAKE_CASE:Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE:Optional[Any] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE:Dict = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(snake_case , snake_case )
SCREAMING_SNAKE_CASE:int = FSMTConfig.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = FSMTForConditionalGeneration(snake_case )
# check that it loads ok
model_new.load_state_dict(snake_case , strict=snake_case )
# save
SCREAMING_SNAKE_CASE:Tuple = os.path.join(snake_case , snake_case )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(snake_case , snake_case )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 143 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _snake_case ( _a ):
_A : List[str] = '''camembert'''
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_522 ,SCREAMING_SNAKE_CASE__ : int=768 ,SCREAMING_SNAKE_CASE__ : List[Any]=12 ,SCREAMING_SNAKE_CASE__ : Any=12 ,SCREAMING_SNAKE_CASE__ : Tuple=3_072 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1e-12 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Any="absolute" ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE:str = hidden_size
SCREAMING_SNAKE_CASE:str = num_hidden_layers
SCREAMING_SNAKE_CASE:List[str] = num_attention_heads
SCREAMING_SNAKE_CASE:Optional[int] = hidden_act
SCREAMING_SNAKE_CASE:int = intermediate_size
SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:str = max_position_embeddings
SCREAMING_SNAKE_CASE:Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE:Optional[int] = initializer_range
SCREAMING_SNAKE_CASE:Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE:Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE:Optional[int] = use_cache
SCREAMING_SNAKE_CASE:List[Any] = classifier_dropout
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : List[str] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE:Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE:str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 143 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ) -> Optional[int]:
UpperCamelCase : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38 |
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : str = 'philschmid/bart-large-cnn-samsum'
__lowerCAmelCase : List[str] = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
__lowerCAmelCase : List[Any] = 'summarizer'
__lowerCAmelCase : List[str] = AutoTokenizer
__lowerCAmelCase : int = AutoModelForSeqaSeqLM
__lowerCAmelCase : Tuple = ['text']
__lowerCAmelCase : Union[str, Any] = ['text']
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , truncation=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.model.generate(**SCREAMING_SNAKE_CASE_)[0]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.pre_processor.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_)
| 12 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a_ :
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int]=1_3 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=9_9 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : str=4 , __lowerCAmelCase : List[str]=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
__snake_case = self.vocab_size - 1
def lowercase__ ( self : Union[str, Any] ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , *__lowerCAmelCase : List[str] ):
__snake_case = OpenAIGPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , *__lowerCAmelCase : str ):
__snake_case = OpenAIGPTLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : Tuple ):
__snake_case = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , *__lowerCAmelCase : int ):
__snake_case = self.num_labels
__snake_case = OpenAIGPTForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[str] ):
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Union[str, Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Any ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase__ ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=False ):
__snake_case = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
__snake_case = inputs_dict['labels']
__snake_case = inputs_dict['labels']
__snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , )
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowercase__ ( self : Tuple ):
__snake_case = OpenAIGPTModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=3_7 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase )
def lowercase__ ( self : int ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase )
def lowercase__ ( self : Any ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase )
def lowercase__ ( self : str ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase )
@slow
def lowercase__ ( self : str ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = OpenAIGPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : int ):
__snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__lowerCAmelCase )
__snake_case = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=__lowerCAmelCase ) # the president is
__snake_case = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__snake_case = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase )
| 356 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCamelCase_ = logging.getLogger(__name__)
def A ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__UpperCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__UpperCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__UpperCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__UpperCAmelCase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__UpperCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__UpperCAmelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__UpperCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
UpperCAmelCase_ = parser.parse_args()
return args
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def fn(__UpperCAmelCase ):
return tokenizer(examples['''text'''] )
return fn
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
UpperCAmelCase_ = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
UpperCAmelCase_ = tf.train.Features(feature=__UpperCAmelCase )
UpperCAmelCase_ = tf.train.Example(features=__UpperCAmelCase )
UpperCAmelCase_ = example.SerializeToString()
records.append(__UpperCAmelCase )
return records
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase_ = min(len(__UpperCAmelCase ) , args.limit )
UpperCAmelCase_ = dataset.select(range(__UpperCAmelCase ) )
print(f"Limiting the dataset to {args.limit} entries." )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
else:
UpperCAmelCase_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase_ = tokenize_function(__UpperCAmelCase )
UpperCAmelCase_ = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__UpperCAmelCase ):
# Concatenate all texts.
UpperCAmelCase_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase_ = {
k: [t[i : i + args.max_length] for i in range(0 , __UpperCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase_ = dataset_tokenized.map(__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=1000 , num_proc=4 )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for shard in range(0 , len(__UpperCAmelCase ) , args.shard_size ):
UpperCAmelCase_ = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase_ = len(dataset_snapshot['''input_ids'''] )
UpperCAmelCase_ = os.path.join(__UpperCAmelCase , f"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCAmelCase_ = get_serialized_examples(__UpperCAmelCase )
with tf.io.TFRecordWriter(__UpperCAmelCase ) as out_file:
for i in range(len(__UpperCAmelCase ) ):
UpperCAmelCase_ = serialized_examples[i]
out_file.write(__UpperCAmelCase )
print('''Wrote file {} containing {} records'''.format(__UpperCAmelCase , __UpperCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(f"Total {args.split} records: {total_records}" , file=__UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 561 |
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(__UpperCAmelCase , x % y )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(__UpperCAmelCase , __UpperCAmelCase )
def A ( __UpperCAmelCase = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(__UpperCAmelCase , __UpperCAmelCase )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 561 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : List[str] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : List[str] = num_labels
_lowerCamelCase : Tuple = num_choices
_lowerCamelCase : Optional[int] = scope
_lowerCamelCase : Any = self.vocab_size - 1
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : str = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowerCamelCase : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : List[Any] = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> Any:
_lowerCamelCase : Optional[Any] = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Any = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : List[Any] = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any = config_and_inputs
_lowerCamelCase : Any = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( A_ ,A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__UpperCAmelCase = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> Union[str, Any]:
_lowerCamelCase : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCamelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Optional[int] = inputs_dict["""labels"""]
_lowerCamelCase : Dict = inputs_dict["""labels"""]
_lowerCamelCase : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE , )
_lowerCamelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE)
return inputs_dict
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Optional[int] = OpenAIGPTModelTester(self)
_lowerCamelCase : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37)
def UpperCamelCase_ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""")
model.to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE) # the president is
_lowerCamelCase : Union[str, Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCamelCase : Optional[Any] = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE)
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE)
| 88 |
"""simple docstring"""
from math import factorial
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = real
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = [1] * rank
else:
_lowerCAmelCase = rank
def __repr__( self : int ) -> List[Any]:
"""simple docstring"""
return (
F"""{self.real}+"""
F"""{'+'.join(str(UpperCAmelCase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def __lowerCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCAmelCase_ )
def __add__( self : List[Any] , UpperCAmelCase_ : List[Any] ) -> str:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return Dual(self.real + other , self.duals )
_lowerCAmelCase = self.duals.copy()
_lowerCAmelCase = other.duals.copy()
if len(UpperCAmelCase_ ) > len(UpperCAmelCase_ ):
o_dual.extend([1] * (len(UpperCAmelCase_ ) - len(UpperCAmelCase_ )) )
elif len(UpperCAmelCase_ ) < len(UpperCAmelCase_ ):
s_dual.extend([1] * (len(UpperCAmelCase_ ) - len(UpperCAmelCase_ )) )
_lowerCAmelCase = []
for i in range(len(UpperCAmelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_: str = __add__
def __sub__( self : Dict , UpperCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
return self + other * -1
def __mul__( self : List[Any] , UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCAmelCase_ )
_lowerCAmelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_: int = __mul__
def __truediv__( self : str , UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCAmelCase_ )
raise ValueError
def __floordiv__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCAmelCase_ )
raise ValueError
def __pow__( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
if n < 0 or isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
_lowerCAmelCase = self
for _ in range(n - 1 ):
x *= self
return x
def __snake_case ( SCREAMING_SNAKE_CASE: Union[str, Any] , SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: Tuple ):
"""simple docstring"""
if not callable(SCREAMING_SNAKE_CASE ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(SCREAMING_SNAKE_CASE , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('differentiate() requires an int as input for order' )
_lowerCAmelCase = Dual(SCREAMING_SNAKE_CASE , 1 )
_lowerCAmelCase = func(SCREAMING_SNAKE_CASE )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __snake_case ( SCREAMING_SNAKE_CASE: Tuple ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 580 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
_UpperCAmelCase = 5
_UpperCAmelCase = 10
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = SpeechaTextTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
def _UpperCAmelCase ( self ):
super().setUp()
_lowerCamelCase = sp.SentencePieceProcessor()
spm_model.Load(a__ )
_lowerCamelCase = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(a__ ) )]
_lowerCamelCase = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCamelCase = Path(self.tmpdirname )
save_json(a__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
_lowerCamelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ):
_lowerCamelCase = '''<pad>'''
_lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(a__ ) , 10_01 )
def _UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def _UpperCAmelCase ( self ):
_lowerCamelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [2_89, 50, 14, 1_74, 3_86] , )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _UpperCAmelCase ( self ):
# fmt: off
_lowerCamelCase = {'''input_ids''': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = "valhalla/s2t_mustc_multilinguial_medium"
_UpperCamelCase = "C'est trop cool"
_UpperCamelCase = "Esto es genial"
@classmethod
def _UpperCAmelCase ( cls ):
_lowerCamelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def _UpperCAmelCase ( self ):
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def _UpperCAmelCase ( self ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def _UpperCAmelCase ( self ):
self.assertIn(a__ , self.tokenizer.all_special_ids )
_lowerCamelCase = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_lowerCamelCase = self.tokenizer.decode(a__ , skip_special_tokens=a__ )
_lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
self.assertNotIn(self.tokenizer.eos_token , a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = '''fr'''
_lowerCamelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , a__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def _UpperCAmelCase ( self ):
_lowerCamelCase = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
_lowerCamelCase = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 706 |
from __future__ import annotations
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 | 0 |
_snake_case = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_snake_case = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_snake_case = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
assert len(str(_lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCamelCase = year // 100
UpperCamelCase = (5 * (century % 4) + 2) % 7
UpperCamelCase = year % 100
UpperCamelCase = centurian % 12
UpperCamelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCamelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCamelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase ) -> str:
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
UpperCamelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
UpperCamelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCamelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(_lowercase )-1}' )
if "norm" in key:
UpperCamelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
UpperCamelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(_lowercase )-1}' )
if "layer_norm1" in key:
UpperCamelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCamelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase = key[key.find('block' ) + len('block' )]
UpperCamelCase = key.replace(F'block{idx}' , F'block.{int(_lowercase )-1}' )
if "attn.q" in key:
UpperCamelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCamelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCamelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCamelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCamelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCamelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCamelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCamelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase = key[key.find('linear_c' ) + len('linear_c' )]
UpperCamelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(_lowercase )-1}' )
if "bot_conv" in key:
UpperCamelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
UpperCamelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
UpperCamelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
UpperCamelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
UpperCamelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
UpperCamelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
UpperCamelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
UpperCamelCase = key.replace('module.last_layer_depth' , 'head.head' )
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCamelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase = kv_bias[config.hidden_sizes[i] :]
def __lowerCamelCase ( ) -> Any:
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return image
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False , _lowercase=None ) -> Tuple:
UpperCamelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase = GLPNImageProcessor()
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_lowercase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.load(_lowercase , map_location=torch.device('cpu' ) )
# rename keys
UpperCamelCase = rename_keys(_lowercase )
# key and value matrices need special treatment
read_in_k_v(_lowercase , _lowercase )
# create HuggingFace model and load state dict
UpperCamelCase = GLPNForDepthEstimation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# forward pass
UpperCamelCase = model(_lowercase )
UpperCamelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCamelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowercase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_lowercase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_lowercase , )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
_snake_case = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 282 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _A ( unittest.TestCase ):
def __a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = tempfile.mkdtemp()
lowercase : List[Any] = SamImageProcessor()
lowercase : Any = SamProcessor(_A )
processor.save_pretrained(self.tmpdirname )
def __a ( self : Tuple , **_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase : int = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : int = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
lowercase : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.get_image_processor()
lowercase : Tuple = SamProcessor(image_processor=_A )
lowercase : Tuple = self.prepare_image_inputs()
lowercase : Any = image_processor(_A , return_tensors='''np''' )
lowercase : List[str] = processor(images=_A , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Any = self.get_image_processor()
lowercase : str = SamProcessor(image_processor=_A )
lowercase : Any = [torch.ones((1, 3, 5, 5) )]
lowercase : Union[str, Any] = [[1_764, 2_646]]
lowercase : Union[str, Any] = [[683, 1_024]]
lowercase : Any = processor.post_process_masks(_A , _A , _A )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
lowercase : Optional[int] = processor.post_process_masks(
_A , torch.tensor(_A ) , torch.tensor(_A ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
lowercase : List[Any] = [np.ones((1, 3, 5, 5) )]
lowercase : Optional[Any] = processor.post_process_masks(_A , np.array(_A ) , np.array(_A ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
lowercase : Optional[int] = [[1, 0], [0, 1]]
with self.assertRaises(_A ):
lowercase : Tuple = processor.post_process_masks(_A , np.array(_A ) , np.array(_A ) )
@require_vision
@require_tf
class _A ( unittest.TestCase ):
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = tempfile.mkdtemp()
lowercase : Union[str, Any] = SamImageProcessor()
lowercase : Optional[int] = SamProcessor(_A )
processor.save_pretrained(self.tmpdirname )
def __a ( self : List[Any] , **_A : Optional[Any] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def __a ( self : int ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __a ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase : Optional[Any] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self : Tuple ) -> int:
"""simple docstring"""
lowercase : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : int = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
lowercase : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Optional[int] = self.get_image_processor()
lowercase : Any = SamProcessor(image_processor=_A )
lowercase : Dict = self.prepare_image_inputs()
lowercase : List[str] = image_processor(_A , return_tensors='''np''' )
lowercase : Optional[Any] = processor(images=_A , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __a ( self : str ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = self.get_image_processor()
lowercase : Tuple = SamProcessor(image_processor=_A )
lowercase : int = [tf.ones((1, 3, 5, 5) )]
lowercase : int = [[1_764, 2_646]]
lowercase : Union[str, Any] = [[683, 1_024]]
lowercase : int = processor.post_process_masks(_A , _A , _A , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
lowercase : int = processor.post_process_masks(
_A , tf.convert_to_tensor(_A ) , tf.convert_to_tensor(_A ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
lowercase : Dict = [np.ones((1, 3, 5, 5) )]
lowercase : Any = processor.post_process_masks(
_A , np.array(_A ) , np.array(_A ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
lowercase : Any = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowercase : Optional[Any] = processor.post_process_masks(
_A , np.array(_A ) , np.array(_A ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class _A ( unittest.TestCase ):
def __a ( self : Tuple ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = tempfile.mkdtemp()
lowercase : Optional[Any] = SamImageProcessor()
lowercase : Optional[Any] = SamProcessor(_A )
processor.save_pretrained(self.tmpdirname )
def __a ( self : List[Any] , **_A : List[Any] ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def __a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase : Tuple = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = self.get_image_processor()
lowercase : List[str] = SamProcessor(image_processor=_A )
lowercase : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowercase : Dict = [tf.convert_to_tensor(_A )]
lowercase : int = [torch.tensor(_A )]
lowercase : List[Any] = [[1_764, 2_646]]
lowercase : Tuple = [[683, 1_024]]
lowercase : Optional[int] = processor.post_process_masks(
_A , _A , _A , return_tensors='''tf''' )
lowercase : List[str] = processor.post_process_masks(
_A , _A , _A , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : Tuple = self.get_image_processor()
lowercase : Optional[int] = SamProcessor(image_processor=_A )
lowercase : Dict = self.prepare_image_inputs()
lowercase : Optional[Any] = image_processor(_A , return_tensors='''pt''' )['''pixel_values'''].numpy()
lowercase : Any = processor(images=_A , return_tensors='''pt''' )['''pixel_values'''].numpy()
lowercase : List[Any] = image_processor(_A , return_tensors='''tf''' )['''pixel_values'''].numpy()
lowercase : Tuple = processor(images=_A , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(_A , _A ) )
self.assertTrue(np.allclose(_A , _A ) )
self.assertTrue(np.allclose(_A , _A ) )
| 701 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _A ( unittest.TestCase ):
def __a ( self : Any ) -> str:
"""simple docstring"""
lowercase : Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : Optional[int] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_A ) , torch_builtin(_A ) ) )
self.assertFalse(torch.allclose(gelu_python(_A ) , gelu_new(_A ) ) )
def __a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : str = get_activation('''gelu''' )
lowercase : str = get_activation('''gelu_10''' )
lowercase : Dict = torch_builtin(_A )
lowercase : List[Any] = geluaa(_A )
lowercase : List[str] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_A ):
get_activation('''bogus''' )
with self.assertRaises(_A ):
get_activation(_A )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : Optional[Any] = get_activation('''gelu''' )
lowercase : Any = 1
lowercase : Tuple = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_A ):
lowercase : str = acta.a | 596 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """CLIPImageProcessor"""
_lowercase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: List[str],A_: int=None,A_: str=None,**A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: Tuple,A_: Any=None,A_: str=None,A_: List[Any]=None,**A_: Tuple ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCamelCase = self.tokenizer(A_,return_tensors=A_,**A_ )
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: int,*A_: Dict,**A_: int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Union[str, Any],**A_: str ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : int=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Dict=37 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Union[str, Any]=1_28 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : int=4 , _UpperCAmelCase : List[Any]=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , ):
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = NezhaModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = NezhaForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Tuple = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = NezhaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = NezhaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(config=_UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """bert.pt""" ) )
UpperCAmelCase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """bert.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCAmelCase__ = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCAmelCase__ = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 603 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """gpt_neox"""
def __init__( self , UpperCAmelCase_=5_04_32 , UpperCAmelCase_=61_44 , UpperCAmelCase_=44 , UpperCAmelCase_=64 , UpperCAmelCase_=2_45_76 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.25 , UpperCAmelCase_=1_00_00 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20_48 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = rotary_pct
snake_case_ = rotary_emb_base
snake_case_ = attention_dropout
snake_case_ = hidden_dropout
snake_case_ = classifier_dropout
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = tie_word_embeddings
snake_case_ = use_parallel_residual
snake_case_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
snake_case_ = self.rope_scaling.get("type" , UpperCAmelCase_ )
snake_case_ = self.rope_scaling.get("factor" , UpperCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 703 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """philschmid/bart-large-cnn-samsum"""
snake_case = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
snake_case = """summarizer"""
snake_case = AutoTokenizer
snake_case = AutoModelForSeqaSeqLM
snake_case = ["""text"""]
snake_case = ["""text"""]
def _lowercase ( self , UpperCAmelCase_ ):
return self.pre_processor(UpperCAmelCase_ , return_tensors="pt" , truncation=UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ ):
return self.model.generate(**UpperCAmelCase_ )[0]
def _lowercase ( self , UpperCAmelCase_ ):
return self.pre_processor.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
| 420 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(
features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= Generator(
cache_dir=lowerCAmelCase , features=lowerCAmelCase , generator=lowerCAmelCase , gen_kwargs=lowerCAmelCase , **lowerCAmelCase , )
def _A (self ):
# Build iterable dataset
if self.streaming:
__lowercase= self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
__lowercase= None
__lowercase= None
__lowercase= None
__lowercase= None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
__lowercase= self.builder.as_dataset(
split='train' , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 230 | 1 |
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[Any] = int(UpperCAmelCase )
# Initialize Result
lowercase__ : str = []
# Traverse through all denomination
for denomination in reversed(UpperCAmelCase ):
# Find denominations
while int(UpperCAmelCase ) >= int(UpperCAmelCase ):
total_value -= int(UpperCAmelCase )
answer.append(UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a: List[str] = []
__a: Optional[Any] = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a: Optional[Any] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
__a: Tuple = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a: str = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__a: Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
__a: List[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 709 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
_enforce_args(UpperCAmelCase , UpperCAmelCase )
if n == 0:
return 0
lowercase__ : Optional[int] = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase__ : str = max(
UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCAmelCase ) )
return max_revue
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
_enforce_args(UpperCAmelCase , UpperCAmelCase )
lowercase__ : List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase__ : Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase__ : Tuple = max(
UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCAmelCase , UpperCAmelCase ) , )
lowercase__ : str = max_revenue
return max_rev[n]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
_enforce_args(UpperCAmelCase , UpperCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase__ : Optional[int] = [float('''-inf''' ) for _ in range(n + 1 )]
lowercase__ : Any = 0
for i in range(1 , n + 1 ):
lowercase__ : Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
lowercase__ : Optional[Any] = max(UpperCAmelCase , prices[j - 1] + max_rev[i - j] )
lowercase__ : Tuple = max_revenue_i
return max_rev[n]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if n < 0:
lowercase__ : str = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(UpperCAmelCase )
if n > len(UpperCAmelCase ):
lowercase__ : Dict = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(UpperCAmelCase )}"""
)
raise ValueError(UpperCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : int = [6, 10, 12, 15, 20, 23]
lowercase__ : Tuple = len(UpperCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase__ : Optional[Any] = 36
lowercase__ : str = top_down_cut_rod(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Optional[int] = bottom_up_cut_rod(UpperCAmelCase , UpperCAmelCase )
lowercase__ : List[str] = naive_cut_rod_recursive(UpperCAmelCase , UpperCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 428 | 0 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : str=False ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = OmegaConf.load(snake_case__ )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case__ ) ) )
return config
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Any=None ) -> Tuple:
if conf_path is None:
_lowerCAmelCase : List[str] = """./model_checkpoints/vqgan_only.yaml"""
_lowerCAmelCase : Any = load_config(snake_case__ , display=snake_case__ )
_lowerCAmelCase : Any = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : List[str] = """./model_checkpoints/vqgan_only.pt"""
_lowerCAmelCase : str = torch.load(snake_case__ , map_location=snake_case__ )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : Optional[Any] = sd["""state_dict"""]
model.load_state_dict(snake_case__ , strict=snake_case__ )
model.to(snake_case__ )
del sd
return model
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = model.encode(snake_case__ )
print(f'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
_lowerCAmelCase : Tuple = model.decode(snake_case__ )
return xrec
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]=False ) -> str:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = string.rsplit(""".""" , 1 )
if reload:
_lowerCAmelCase : str = importlib.import_module(snake_case__ )
importlib.reload(snake_case__ )
return getattr(importlib.import_module(snake_case__ , package=snake_case__ ) , cls )
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> List[Any]:
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Union[str, Any]=True ) -> Union[str, Any]:
_lowerCAmelCase : List[Any] = instantiate_from_config(snake_case__ )
if sd is not None:
model.load_state_dict(snake_case__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Tuple ) -> Any:
# load the specified checkpoint
if ckpt:
_lowerCAmelCase : Dict = torch.load(snake_case__ , map_location="""cpu""" )
_lowerCAmelCase : List[str] = pl_sd["""global_step"""]
print(f'loaded model from global step {global_step}.' )
else:
_lowerCAmelCase : List[str] = {"""state_dict""": None}
_lowerCAmelCase : int = None
_lowerCAmelCase : Any = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=snake_case__ , eval_mode=snake_case__ )["""model"""]
return model, global_step
| 384 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a__ ( snake_case__ ) -> Dict[str, torch.Tensor]:
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
for rt in rc.restypes:
lowerCamelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowerCamelCase = {name: i for i, name in enumerate(snake_case__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
lowerCamelCase = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase = restype_atomaa_mask[protein_aatype]
lowerCamelCase = residx_atomaa_mask
lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowerCamelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowerCamelCase = rc.restype_atoa[restype_letter]
lowerCamelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowerCamelCase = rc.atom_order[atom_name]
lowerCamelCase = 1
lowerCamelCase = restype_atomaa_mask[protein_aatype]
lowerCamelCase = residx_atomaa_mask
return protein
def a__ ( snake_case__ ) -> Dict[str, np.ndarray]:
lowerCamelCase = tree_map(lambda snake_case__ : torch.tensor(snake_case__ , device=batch["""aatype"""].device ) , snake_case__ , np.ndarray )
lowerCamelCase = tensor_tree_map(lambda snake_case__ : np.array(snake_case__ ) , make_atomaa_masks(snake_case__ ) )
return out
| 543 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : bool , _UpperCamelCase : list[int] , _UpperCamelCase : float ):
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_UpperCamelCase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
UpperCAmelCase_ = math.log(len(_UpperCamelCase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | '''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : str ) ->List[str]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : List[str] ) ->int:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 43 | 1 |
from math import pow, sqrt
def a_ ( *_A ) -> List[str]:
"""simple docstring"""
snake_case__ = len(_SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def a_ ( _A , _A ) -> str:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def a_ ( _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a_ ( _A , _A , _A ) -> Union[str, Any]:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a_ ( _A , _A , _A ) -> Tuple:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a_ ( _A , _A , _A ) -> Any:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 328 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a__ :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , ) -> Optional[int]:
__A= parent
__A= 13
__A= 7
__A= True
__A= True
__A= False
__A= True
__A= 99
__A= 32
__A= 2
__A= 4
__A= 37
__A= 'gelu'
__A= 0.1
__A= 0.1
__A= 512
__A= 16
__A= 2
__A= 0.02
__A= 3
__A= 4
__A= None
def lowerCAmelCase ( self : Optional[Any] ) -> str:
__A= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A= None
if self.use_input_mask:
__A= random_attention_mask([self.batch_size, self.seq_length] )
__A= None
__A= None
__A= None
if self.use_labels:
__A= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A= ids_tensor([self.batch_size] , self.num_choices )
__A= DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] ) -> Any:
__A= TFDistilBertModel(config=lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
__A= [input_ids, input_mask]
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__A= TFDistilBertForMaskedLM(config=lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> int:
__A= TFDistilBertForQuestionAnswering(config=lowerCAmelCase_ )
__A= {
'input_ids': input_ids,
'attention_mask': input_mask,
}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
__A= self.num_labels
__A= TFDistilBertForSequenceClassification(lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__A= self.num_choices
__A= TFDistilBertForMultipleChoice(lowerCAmelCase_ )
__A= tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A= tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A= {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
__A= self.num_labels
__A= TFDistilBertForTokenClassification(lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
__A= self.prepare_config_and_inputs()
((__A), (__A), (__A), (__A), (__A), (__A))= config_and_inputs
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a__ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A : Optional[int] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : str = False
A : List[Any] = False
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__A= TFDistilBertModelTester(self )
__A= ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowerCAmelCase ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def lowerCAmelCase ( self : str ) -> Any:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : int ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__A= TFDistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> List[Any]:
__A= TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
__A= tf.constant([[0, 1, 2, 3, 4, 5]] )
__A= model(lowerCAmelCase_ )[0]
__A= [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase_ )
__A= tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
| 186 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''hf-internal-testing/tiny-random-t5'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer('''This is me''' , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE__ = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE__ = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''hf-internal-testing/tiny-random-t5'''
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 709 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return "".join(sorted(_A ) )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return word_by_signature[signature(_A )]
_SCREAMING_SNAKE_CASE : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_SCREAMING_SNAKE_CASE : str = sorted({word.strip().lower() for word in data.splitlines()})
_SCREAMING_SNAKE_CASE : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 472 | 0 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A_ : int = logging.get_logger(__name__)
class lowerCamelCase (A__ ):
def __init__( self : List[Any] , **__UpperCAmelCase : int ) -> Dict:
requires_backends(self , ["""bs4"""] )
super().__init__(**__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
SCREAMING_SNAKE_CASE__ = parent.find_all(child.name , recursive=__UpperCAmelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__UpperCAmelCase ) else next(i for i, s in enumerate(__UpperCAmelCase , 1 ) if s is child ) )
SCREAMING_SNAKE_CASE__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = BeautifulSoup(__UpperCAmelCase , """html.parser""" )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for element in html_code.descendants:
if type(__UpperCAmelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
SCREAMING_SNAKE_CASE__ = html.unescape(__UpperCAmelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.xpath_soup(__UpperCAmelCase )
stringaxtag_seq.append(__UpperCAmelCase )
stringaxsubs_seq.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = """"""
for tagname, subs in zip(__UpperCAmelCase , __UpperCAmelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : Tuple , __UpperCAmelCase : Optional[Any] ) -> BatchFeature:
SCREAMING_SNAKE_CASE__ = False
# Check that strings has a valid type
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = True
elif isinstance(__UpperCAmelCase , (list, tuple) ):
if len(__UpperCAmelCase ) == 0 or isinstance(html_strings[0] , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"""but is of type {type(__UpperCAmelCase )}.""" )
SCREAMING_SNAKE_CASE__ = bool(isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , __UpperCAmelCase )) )
if not is_batched:
SCREAMING_SNAKE_CASE__ = [html_strings]
# Get nodes + xpaths
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for html_string in html_strings:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_three_from_single(__UpperCAmelCase )
nodes.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = []
for node, tag_list, sub_list in zip(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = self.construct_xpath(__UpperCAmelCase , __UpperCAmelCase )
xpath_strings.append(__UpperCAmelCase )
xpaths.append(__UpperCAmelCase )
# return as Dict
SCREAMING_SNAKE_CASE__ = {"""nodes""": nodes, """xpaths""": xpaths}
SCREAMING_SNAKE_CASE__ = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
return encoded_inputs
| 196 |
"""simple docstring"""
A_ : Any = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 196 | 1 |
import pprint
import requests
_UpperCamelCase = "https://zenquotes.io/api"
def _lowercase ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _lowercase ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_UpperCamelCase = random_quotes()
pprint.pprint(response)
| 583 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["DPTFeatureExtractor"]
_UpperCamelCase = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 | 1 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Tuple = logging.get_logger(__name__)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any] ) -> Optional[Any]:
"""simple docstring"""
__a = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
__a = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$', SCREAMING_SNAKE_CASE__ )
if matches:
__a = float(matches[1] )
__a = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__a = 1001
__a = 'imagenet-1k-id2label.json'
__a = 'huggingface/label-files'
__a = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type='dataset' ), 'r' ) )
__a = {int(SCREAMING_SNAKE_CASE__ ) + 1: v for k, v in idalabel.items()}
__a = 'background'
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
__a = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: Optional[int], SCREAMING_SNAKE_CASE__: str=False ) -> int:
"""simple docstring"""
__a = get_mobilenet_va_config(SCREAMING_SNAKE_CASE__ )
# Load 🤗 model
__a = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__a = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size}, size={'shortest_edge': config.image_size + 32}, )
__a = image_processor(images=prepare_img(), return_tensors='pt' )
__a = model(**SCREAMING_SNAKE_CASE__ )
__a = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__a = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__a = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__a = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3], SCREAMING_SNAKE_CASE__, atol=1e-4 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing to the hub...' )
__a = 'google/' + model_name
image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : Any = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 448 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =["pixel_values"]
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) ->None:
'''simple docstring'''
__a = do_resize
__a = do_rescale
__a = size_divisor
__a = resample
super().__init__(**lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) ->np.ndarray:
'''simple docstring'''
__a , __a = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__a = height // size_divisor * size_divisor
__a = width // size_divisor * size_divisor
__a = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) ->np.ndarray:
'''simple docstring'''
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) ->BatchFeature:
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = size_divisor if size_divisor is not None else self.size_divisor
__a = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
__a = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase ) | 448 | 1 |
from datetime import datetime
import requests
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bytes:
lowercase__ = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
lowercase__ = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(_SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
lowercase_ = input("""Enter Video/IGTV url: """).strip()
lowercase_ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 718 |
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Tuple ={'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 148 |
import logging
import os
from .state import PartialState
class UpperCAmelCase_ ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def _A ( _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _A ( self , _A , _A , *_A , **_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('main_process_only' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('in_order' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
__SCREAMING_SNAKE_CASE = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def __lowercase ( a__ , a__ = None ) -> Optional[Any]:
if log_level is None:
__SCREAMING_SNAKE_CASE = os.environ.get('ACCELERATE_LOG_LEVEL' , a__ )
__SCREAMING_SNAKE_CASE = logging.getLogger(a__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(a__ , {} )
| 148 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=[0.5, 0.5, 0.5] , UpperCAmelCase_ : int=[0.5, 0.5, 0.5] , ) ->int:
"""simple docstring"""
snake_case_ = size if size is not None else {"""shortest_edge""": 30}
snake_case_ = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize_and_center_crop
snake_case_ = size
snake_case_ = crop_pct
snake_case_ = crop_size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = PoolFormerImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = PoolFormerImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """crop_pct""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 705 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = KandinskyVaaControlnetPipeline
__lowercase: str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase: Tuple = False
@property
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=0 ) ->List[str]:
"""simple docstring"""
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
# create hint
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = """cpu"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case_ = torch.from_numpy(np.array(UpperCAmelCase_ ) ).float() / 255.0
snake_case_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case_ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
snake_case_ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A robot, 4k photo"""
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , hint=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type="""np""" , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 2 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list ) -> list:
'''simple docstring'''
UpperCAmelCase_ = len(snake_case_ )
for _ in range(snake_case_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCAmelCase_ , UpperCAmelCase_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =list(range(10, 0, -1))
print(f"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 78 |
def UpperCamelCase_ ( __a = 50 ) -> int:
a__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 | 0 |
'''simple docstring'''
def _a ( lowerCamelCase_ ):
snake_case : int =[]
if len(lowerCamelCase_ ) == 1:
return [nums.copy()]
for _ in range(len(lowerCamelCase_ ) ):
snake_case : str =nums.pop(0 )
snake_case : Optional[int] =permute(lowerCamelCase_ )
for perm in permutations:
perm.append(lowerCamelCase_ )
result.extend(lowerCamelCase_ )
nums.append(lowerCamelCase_ )
return result
def _a ( lowerCamelCase_ ):
def backtrack(lowerCamelCase_ ):
if start == len(lowerCamelCase_ ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCamelCase_ , len(lowerCamelCase_ ) ):
snake_case : Dict =nums[i], nums[start]
backtrack(start + 1 )
snake_case : Optional[Any] =nums[i], nums[start] # backtrack
snake_case : Tuple =[]
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
A : List[Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 708 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
A : str = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _a ( ):
snake_case : str =os.path.dirname(os.path.realpath(lowerCamelCase_ ) )
snake_case : Optional[Any] =os.path.join(lowerCamelCase_ , '''words.txt''' )
snake_case : List[Any] =''''''
with open(lowerCamelCase_ ) as f:
snake_case : str =f.readline()
snake_case : Union[str, Any] =[word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
snake_case : str =[
word
for word in [sum(ord(lowerCamelCase_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCamelCase_ )
if __name__ == "__main__":
print(solution())
| 136 | 0 |
"""simple docstring"""
from math import sqrt
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2 , int(round(sqrt(_UpperCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'status' must been from type bool"
return status
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2 , n + 1 ) )
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCamelCase ) ):
for j in range(i + 1 , len(_UpperCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list"
return ans
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_UpperCamelCase ):
ans.append(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list"
return ans
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(_UpperCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCamelCase ):
while quotient != 1:
if is_prime(_UpperCamelCase ) and (quotient % factor == 0):
ans.append(_UpperCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list"
return ans
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(_UpperCamelCase )
__lowerCAmelCase = max(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type int"
return ans
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(_UpperCamelCase )
__lowerCAmelCase = min(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type int"
return ans
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _UpperCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _UpperCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert (
isinstance(_UpperCamelCase , _UpperCamelCase ) and (number > 2) and is_even(_UpperCamelCase )
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(_UpperCamelCase )
__lowerCAmelCase = len(_UpperCamelCase )
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and (len(_UpperCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(_UpperCamelCase )
__lowerCAmelCase = prime_factorization(_UpperCamelCase )
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(_UpperCamelCase )
__lowerCAmelCase = prime_fac_a.count(_UpperCamelCase )
for _ in range(max(_UpperCamelCase , _UpperCamelCase ) ):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
ans *= n
done.append(_UpperCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
ans *= n
done.append(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCamelCase ):
ans += 1
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and is_prime(
_UpperCamelCase ), "'ans' must been a prime number and from type int"
return ans
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
is_prime(_UpperCamelCase ) and is_prime(_UpperCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCamelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCamelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and ans[0] != p_number_a
and ans[len(_UpperCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_UpperCamelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(_UpperCamelCase )
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(_UpperCamelCase ) , abs(_UpperCamelCase ) )
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 636 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A : Optional[Any] = None
A : Any = logging.get_logger(__name__)
A : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A : List[Any] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
A : Union[str, Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
A : Optional[int] = "▁"
# Segments (not really needed)
A : str = 0
A : Dict = 1
A : List[str] = 2
A : Tuple = 3
A : Tuple = 4
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] ="""left"""
__UpperCAmelCase : List[str] =XLNetTokenizer
def __init__( self , __a=None , __a=None , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , **__a , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
vocab_file=__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = 3
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = False if not self.vocab_file else True
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self , __a , __a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 636 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 257 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=a_ )
_UpperCAmelCase : Union[str, Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
_UpperCAmelCase : List[Any] = parser.parse_args()
if not hasattr(a_, "func" ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main() | 257 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 1 |
"""simple docstring"""
class _SCREAMING_SNAKE_CASE : # Public class to implement a graph
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
lowercase__ : Any = row
lowercase__ : Union[str, Any] = col
lowercase__ : List[Any] = graph
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
# Checking all 8 elements surrounding nth element
lowercase__ : Tuple = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ : str = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> int: # And finally, count all islands.
lowercase__ : Union[str, Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ : Optional[int] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += 1
return count | 714 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__snake_case = sys.version_info >= (3, 10)
def _lowerCamelCase ( lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Any=None ):
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : int
_a : float
_a : str
_a : bool
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : int = 42
_a : str = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : bool = False
_a : bool = True
_a : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Optional[Any] = '''titi'''
_a : str = '''toto'''
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Union[str, Any] = '''titi'''
_a : Union[str, Any] = '''toto'''
_a : Tuple = 42
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : BasicEnum = "toto"
def UpperCAmelCase__( self ) -> Optional[Any]:
lowercase__ : List[str] = BasicEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : MixedTypeEnum = "toto"
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Any = MixedTypeEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Optional[int] = None
_a : Optional[float] = field(default=__UpperCAmelCase , metadata={'''help''': '''help message'''} )
_a : Optional[str] = None
_a : Optional[List[str]] = list_field(default=[] )
_a : Optional[List[int]] = list_field(default=[] )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : List[int] = list_field(default=[] )
_a : List[int] = list_field(default=[1, 2, 3] )
_a : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
_a : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : List[int] = field()
_a : str = field()
_a : BasicEnum = field()
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Optional[Any] = BasicEnum(self.required_enum )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : int
_a : "BasicEnum" = field()
_a : "Optional[bool]" = None
_a : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} )
_a : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : bool = False
_a : bool = True
_a : bool | None = None
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : int | None = None
_a : float | None = field(default=__UpperCAmelCase , metadata={'''help''': '''help message'''} )
_a : str | None = None
_a : list[str] | None = list_field(default=[] )
_a : list[int] | None = list_field(default=[] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase__ : int = {k: v for k, v in vars(lowerCamelCase__ ).items() if k != """container"""}
lowercase__ : Any = {k: v for k, v in vars(lowerCamelCase__ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowerCamelCase__ ) and yy.get("""choices""" , lowerCamelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowerCamelCase__ ) , yy["""type"""](lowerCamelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Any = HfArgumentParser(lowerCamelCase__ )
lowercase__ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--bar""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--baz""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--flag""" , type=lowerCamelCase__ , default=lowerCamelCase__ , const=lowerCamelCase__ , nargs="""?""" )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Tuple = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((lowercase__) , ) : Optional[int] = parser.parse_args_into_dataclasses(lowerCamelCase__ , look_for_args_file=lowerCamelCase__ )
self.assertFalse(example.flag )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[str] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowerCamelCase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase__ , help="""help message""" )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase__ , default=lowerCamelCase__ , const=lowerCamelCase__ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowerCamelCase__ , default=lowerCamelCase__ , const=lowerCamelCase__ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowerCamelCase__ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowerCamelCase__ , default=lowerCamelCase__ )
lowercase__ : Any = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase__ )
for dataclass_type in dataclass_types:
lowercase__ : Optional[int] = HfArgumentParser(lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = parser.parse_args([] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
lowercase__ : Optional[Any] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
lowercase__ : str = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
lowercase__ : List[str] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
lowercase__ : Dict = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Dict = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase__ : Optional[int] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase__ : Union[str, Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase__ : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase__ : Tuple = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
lowercase__ : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase__( self ) -> List[str]:
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Literal["titi", "toto", 42] = "toto"
lowercase__ : Tuple = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase__ : Any = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase__ : int = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase__( self ) -> int:
lowercase__ : Optional[Any] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowerCamelCase__ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowerCamelCase__ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase__ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = parser.parse_args([] )
self.assertEqual(
lowerCamelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase__ : List[str] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowerCamelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowerCamelCase__ , type=lowerCamelCase__ )
expected.add_argument("""--bar""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowerCamelCase__ , type=lowerCamelCase__ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowerCamelCase__ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowerCamelCase__ )
lowercase__ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase__ )
for dataclass_type in dataclass_types:
lowercase__ : Dict = HfArgumentParser(lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , bar=lowerCamelCase__ , baz=lowerCamelCase__ , ces=[] , des=[] ) )
lowercase__ : Optional[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowerCamelCase__ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--required_str""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase__ , )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[Any] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase__ , )
expected.add_argument("""--opt""" , type=lowerCamelCase__ , default=lowerCamelCase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase__ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> str:
lowercase__ : Any = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Tuple = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
lowercase__ : int = parser.parse_dict(lowerCamelCase__ )[0]
lowercase__ : List[str] = BasicExample(**lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Union[str, Any] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Dict = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowerCamelCase__ , parser.parse_dict , lowerCamelCase__ , allow_extra_keys=lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : List[str] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : List[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : int = os.path.join(lowerCamelCase__ , """temp_json""" )
os.mkdir(lowerCamelCase__ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
lowercase__ : Any = BasicExample(**lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Union[str, Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Any = os.path.join(lowerCamelCase__ , """temp_yaml""" )
os.mkdir(lowerCamelCase__ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
lowercase__ : Dict = BasicExample(**lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Optional[int] = HfArgumentParser(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ ) | 128 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __magic_name__ ( A__ ):
'''simple docstring'''
@slow
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase = bertabert.config.encoder.vocab_size
lowerCamelCase = tokenizer.sep_token_id
lowerCamelCase = tokenizer.cls_token_id
lowerCamelCase = 128
lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowerCamelCase = train_dataset.select(range(32 ) )
lowerCamelCase = val_dataset.select(range(16 ) )
lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(_a ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__lowercase , max_length=512 )
lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__lowercase , max_length=128 )
lowerCamelCase = inputs.input_ids
lowerCamelCase = inputs.attention_mask
lowerCamelCase = outputs.input_ids
lowerCamelCase = outputs.input_ids.copy()
lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowerCamelCase = outputs.attention_mask
assert all(len(__lowercase ) == 512 for x in inputs.input_ids )
assert all(len(__lowercase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_a ):
lowerCamelCase = pred.label_ids
lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
lowerCamelCase = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
lowerCamelCase = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowercase ) )] ) / len(__lowercase )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowercase , batch_size=__lowercase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowercase , batch_size=__lowercase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowerCamelCase = self.get_auto_remove_tmp_dir()
lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__lowercase , per_device_train_batch_size=__lowercase , per_device_eval_batch_size=__lowercase , predict_with_generate=__lowercase , evaluation_strategy="""steps""" , do_train=__lowercase , do_eval=__lowercase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase = SeqaSeqTrainer(
model=__lowercase , args=__lowercase , compute_metrics=_compute_metrics , train_dataset=__lowercase , eval_dataset=__lowercase , tokenizer=__lowercase , )
# start training
trainer.train()
| 543 |
from importlib import import_module
from .logging import get_logger
_lowercase : Optional[int] =get_logger(__name__)
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=None ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , __lowercase , getattr(__lowercase , __lowercase ) )
a__ : Optional[int] = module._original_module if isinstance(__lowercase , _PatchedModuleObj ) else module
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = []
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Dict:
"""simple docstring"""
a__ : List[Any] = obj
a__ : Dict = target
a__ : List[Any] = new
a__ : Tuple = target.split(""".""" )[0]
a__ : Any = {}
a__ : str = attrs or []
def __enter__( self ) -> Union[str, Any]:
"""simple docstring"""
*a__ , a__ : Dict = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__lowercase ) ):
try:
a__ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ : List[Any] = getattr(self.obj , __lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
a__ : str = obj_attr
# patch at top level
setattr(self.obj , __lowercase , _PatchedModuleObj(__lowercase , attrs=self.attrs ) )
a__ : str = getattr(self.obj , __lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__lowercase , __lowercase , _PatchedModuleObj(getattr(__lowercase , __lowercase , __lowercase ) , attrs=self.attrs ) )
a__ : Optional[int] = getattr(__lowercase , __lowercase )
# finally set the target attribute
setattr(__lowercase , __lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ : str = getattr(import_module(""".""".join(__lowercase ) ) , __lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __lowercase ) is attr_value:
a__ : Tuple = getattr(self.obj , __lowercase )
setattr(self.obj , __lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ : Union[str, Any] = globals()["""__builtins__"""][target_attr]
setattr(self.obj , __lowercase , self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *__lowercase ) -> List[str]:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , __lowercase , self.original.pop(__lowercase ) )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 136 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( _UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE__ = 'Pix2StructImageProcessor'
SCREAMING_SNAKE_CASE__ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = False
super().__init__(_UpperCAmelCase ,_UpperCAmelCase )
def __call__( self ,a_=None ,a_ = None ,a_ = True ,a_ = False ,a_ = None ,a_ = None ,a_ = 2048 ,a_ = 0 ,a_ = None ,a_ = None ,a_ = False ,a_ = False ,a_ = False ,a_ = False ,a_ = False ,a_ = True ,a_ = None ,**a_ ,):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase__ = self.tokenizer
lowerCAmelCase__ = self.tokenizer(
text=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,max_length=_UpperCAmelCase ,stride=_UpperCAmelCase ,pad_to_multiple_of=_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,return_overflowing_tokens=_UpperCAmelCase ,return_special_tokens_mask=_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase ,return_length=_UpperCAmelCase ,verbose=_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase__ = self.image_processor(
_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,max_patches=_UpperCAmelCase ,**_UpperCAmelCase )
else:
# add pixel_values and bbox
lowerCAmelCase__ = self.image_processor(
_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,max_patches=_UpperCAmelCase ,header_text=_UpperCAmelCase ,**_UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase__ = self.tokenizer(
text=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,max_length=_UpperCAmelCase ,stride=_UpperCAmelCase ,pad_to_multiple_of=_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,return_overflowing_tokens=_UpperCAmelCase ,return_special_tokens_mask=_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase ,return_length=_UpperCAmelCase ,verbose=_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase ,)
if "attention_mask" in text_encoding:
lowerCAmelCase__ = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
lowerCAmelCase__ = text_encoding.pop('input_ids' )
else:
lowerCAmelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCAmelCase )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self ,*a_ ,**a_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self ,*a_ ,**a_ ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase ,**_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.model_input_names
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 713 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_lowerCAmelCase : Any = False
class __snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger '
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=a_ ,generator=a_ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
lowerCAmelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=a_ ,generator=a_ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger '
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=a_ ,generator=a_ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
lowerCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 604 | 0 |
'''simple docstring'''
def __UpperCamelCase( _A : list[list[float]] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
for data in source_data:
for i, el in enumerate(__snake_case ):
if len(__snake_case ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__snake_case ) )
return data_lists
def __UpperCamelCase( _A : list[list[float]] , _A : list[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for dlist, weight in zip(__snake_case , __snake_case ):
UpperCAmelCase__ : Tuple = min(__snake_case )
UpperCAmelCase__ : Optional[Any] = max(__snake_case )
UpperCAmelCase__ : Any = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCAmelCase__ : List[Any] = F'''Invalid weight of {weight:f} provided'''
raise ValueError(__snake_case )
score_lists.append(__snake_case )
return score_lists
def __UpperCamelCase( _A : list[list[float]] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__snake_case ):
UpperCAmelCase__ : Any = final_scores[j] + ele
return final_scores
def __UpperCamelCase( _A : list[list[float]] , _A : list[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = get_data(__snake_case )
UpperCAmelCase__ : int = calculate_each_score(__snake_case , __snake_case )
UpperCAmelCase__ : int = generate_final_scores(__snake_case )
# append scores to source data
for i, ele in enumerate(__snake_case ):
source_data[i].append(__snake_case )
return source_data
| 614 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Any = """swinv2"""
lowerCAmelCase : Union[str, Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase__=224 , UpperCAmelCase__=4 , UpperCAmelCase__=3 , UpperCAmelCase__=96 , UpperCAmelCase__=[2, 2, 6, 2] , UpperCAmelCase__=[3, 6, 12, 24] , UpperCAmelCase__=7 , UpperCAmelCase__=4.0 , UpperCAmelCase__=True , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__="gelu" , UpperCAmelCase__=False , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-5 , UpperCAmelCase__=32 , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(UpperCAmelCase__ )
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = layer_norm_eps
A__ = initializer_range
A__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
A__ = (0, 0, 0, 0)
| 712 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@slow
def __A ( self ):
A__ = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
A__ = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(UpperCAmelCase__ )
from datasets import load_dataset
A__ = load_dataset("nielsr/rvlcdip-demo" )
A__ = dataset["train"][0]["image"].convert("RGB" )
A__ = image_processor(UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__ )
A__ = outputs.logits
A__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCAmelCase__ )
A__ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=UpperCAmelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 232 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class __a ( _lowerCAmelCase ):
def __init__( self : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str )-> Optional[int]:
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None )-> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase_ : Tuple )-> Dict:
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = load_image(UpperCAmelCase_ )
UpperCamelCase = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] )-> int:
"""simple docstring"""
UpperCamelCase = self.model(**UpperCAmelCase_ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=5 )-> Tuple:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCAmelCase_ )
elif self.framework == "tf":
UpperCamelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCamelCase = tf.math.top_k(UpperCAmelCase_ , k=UpperCAmelCase_ )
UpperCamelCase , UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
| 554 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
return max(metric_fn(UpperCAmelCase_ , UpperCAmelCase_ ) for gt in ground_truths )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = []
if args.gold_data_mode == "qa":
UpperCamelCase = pd.read_csv(UpperCAmelCase_ , sep="\t" , header=UpperCAmelCase_ )
for answer_list in data[1]:
UpperCamelCase = ast.literal_eval(UpperCAmelCase_ )
answers.append(UpperCAmelCase_ )
else:
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = [[reference] for reference in references]
UpperCamelCase = UpperCamelCase = UpperCamelCase = 0
for prediction, ground_truths in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
fa += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = 100.0 * em / total
UpperCamelCase = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = args.k
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = UpperCamelCase = 0
for hypo, reference in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = set(hypo.split("\t" )[:k] )
UpperCamelCase = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
def strip_title(UpperCAmelCase_ ):
if title.startswith("\"" ):
UpperCamelCase = title[1:]
if title.endswith("\"" ):
UpperCamelCase = title[:-1]
return title
UpperCamelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , )["input_ids"].to(args.device )
UpperCamelCase = rag_model.rag.question_encoder(UpperCAmelCase_ )
UpperCamelCase = question_enc_outputs[0]
UpperCamelCase = rag_model.retriever(
UpperCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
UpperCamelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase = []
for docs in all_docs:
UpperCamelCase = [strip_title(UpperCAmelCase_ ) for title in docs["title"]]
provenance_strings.append("\t".join(UpperCAmelCase_ ) )
return provenance_strings
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
UpperCamelCase = inputs_dict.input_ids.to(args.device )
UpperCamelCase = inputs_dict.attention_mask.to(args.device )
UpperCamelCase = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info("Q: {} - A: {}".format(UpperCAmelCase_ , UpperCAmelCase_ ) )
return answers
def lowerCamelCase__ ( )-> Any:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=UpperCAmelCase_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=UpperCAmelCase_ , choices=["exact", "compressed", "legacy"] , type=UpperCAmelCase_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=UpperCAmelCase_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=UpperCAmelCase_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=UpperCAmelCase_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=UpperCAmelCase_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=UpperCAmelCase_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=UpperCAmelCase_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=UpperCAmelCase_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=UpperCAmelCase_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=UpperCAmelCase_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
UpperCamelCase = parser.parse_args()
UpperCamelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = {}
if args.model_type is None:
UpperCamelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
UpperCamelCase = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCamelCase = args.n_docs
if args.index_name is not None:
UpperCamelCase = args.index_name
if args.index_path is not None:
UpperCamelCase = args.index_path
else:
UpperCamelCase = BartForConditionalGeneration
UpperCamelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , UpperCAmelCase_ )
UpperCamelCase = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCamelCase = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(UpperCAmelCase_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
UpperCamelCase = RagRetriever.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = model_class.from_pretrained(UpperCAmelCase_ , retriever=UpperCAmelCase_ , **UpperCAmelCase_ )
model.retriever.init_retrieval()
else:
UpperCamelCase = model_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
UpperCamelCase = []
for line in tqdm(UpperCAmelCase_ ):
questions.append(line.strip() )
if len(UpperCAmelCase_ ) == args.eval_batch_size:
UpperCamelCase = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write("\n".join(UpperCAmelCase_ ) + "\n" )
preds_file.flush()
UpperCamelCase = []
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write("\n".join(UpperCAmelCase_ ) )
preds_file.flush()
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = get_args()
main(args)
| 554 | 1 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__A : str = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Tuple = {}
state_dict.pop("""pixel_mean""" , lowerCamelCase_ )
state_dict.pop("""pixel_std""" , lowerCamelCase_ )
snake_case_ : Optional[int] = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case_ : str = key.replace(lowerCamelCase_ , lowerCamelCase_ )
if re.match(lowerCamelCase_ , lowerCamelCase_ ):
snake_case_ : Tuple = int(re.match(lowerCamelCase_ , lowerCamelCase_ ).group(2 ) )
if layer_nb == 0:
snake_case_ : str = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
snake_case_ : Dict = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
snake_case_ : List[str] = key.replace("""layers.2""" , """proj_out""" )
snake_case_ : Union[str, Any] = value
snake_case_ : Tuple = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :str="ybelkada/segment-anything" ):
'''simple docstring'''
snake_case_ : int = hf_hub_download(lowerCamelCase_ , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
snake_case_ : Any = SamConfig()
elif "sam_vit_l" in model_name:
snake_case_ : int = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case_ : int = SamConfig(
vision_config=lowerCamelCase_ , )
elif "sam_vit_h" in model_name:
snake_case_ : Optional[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case_ : Tuple = SamConfig(
vision_config=lowerCamelCase_ , )
snake_case_ : Optional[Any] = torch.load(lowerCamelCase_ , map_location="""cpu""" )
snake_case_ : Dict = replace_keys(lowerCamelCase_ )
snake_case_ : List[Any] = SamImageProcessor()
snake_case_ : List[str] = SamProcessor(image_processor=lowerCamelCase_ )
snake_case_ : str = SamModel(lowerCamelCase_ )
hf_model.load_state_dict(lowerCamelCase_ )
snake_case_ : List[str] = hf_model.to("""cuda""" )
snake_case_ : Any = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
snake_case_ : Union[str, Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
snake_case_ : str = [[[4_00, 6_50]]]
snake_case_ : List[Any] = [[1]]
snake_case_ : Any = processor(images=np.array(lowerCamelCase_ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case_ : str = hf_model(**lowerCamelCase_ )
snake_case_ : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
snake_case_ : Optional[Any] = processor(
images=np.array(lowerCamelCase_ ) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case_ : List[Any] = hf_model(**lowerCamelCase_ )
snake_case_ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
snake_case_ : str = ((75, 2_75, 17_25, 8_50),)
snake_case_ : str = processor(images=np.array(lowerCamelCase_ ) , input_boxes=lowerCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case_ : Optional[Any] = hf_model(**lowerCamelCase_ )
snake_case_ : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
snake_case_ : Dict = [[[4_00, 6_50], [8_00, 6_50]]]
snake_case_ : Union[str, Any] = [[1, 1]]
snake_case_ : List[Any] = processor(
images=np.array(lowerCamelCase_ ) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case_ : str = hf_model(**lowerCamelCase_ )
snake_case_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
__A : Any = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__A : Optional[int] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id) | 267 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any]=0 ):
'''simple docstring'''
# Format the message.
if name is None:
snake_case_ : Tuple = None
else:
snake_case_ : Optional[Any] = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
snake_case_ : Optional[Any] = fmt.format(lowerCamelCase_ )
# Print and recurse (if needed).
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if msg is not None:
print(lowerCamelCase_ )
for k in val.keys():
recursive_print(lowerCamelCase_ , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase_ , torch.Tensor ):
print(lowerCamelCase_ , """:""" , val.size() )
else:
print(lowerCamelCase_ , """:""" , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int ):
'''simple docstring'''
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : List[Any] = param.view(*lowerCamelCase_ )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : List[str] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : int = param.view(*lowerCamelCase_ )
snake_case_ : str = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*lowerCamelCase_ )
return param
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple ):
'''simple docstring'''
# The converted output model.
snake_case_ : Tuple = {}
# old versions did not store training args
snake_case_ : Optional[Any] = input_state_dict.get("""args""" , lowerCamelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Optional[int] = ds_args.padded_vocab_size
snake_case_ : str = ds_args.max_position_embeddings
snake_case_ : Tuple = ds_args.hidden_size
snake_case_ : List[str] = ds_args.num_layers
snake_case_ : Union[str, Any] = ds_args.num_attention_heads
snake_case_ : Tuple = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : int = config.n_head
# The hidden_size per head.
snake_case_ : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Tuple = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : Dict = 0.0
# The model.
snake_case_ : Optional[Any] = input_state_dict["""model"""]
# The language model.
snake_case_ : Optional[Any] = model["""language_model"""]
# The embeddings.
snake_case_ : int = lm["""embedding"""]
# The word embeddings.
snake_case_ : Any = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Any = word_embeddings[: config.vocab_size, :]
snake_case_ : Union[str, Any] = word_embeddings
# The position embeddings.
snake_case_ : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
snake_case_ : List[str] = pos_embeddings
# The transformer.
snake_case_ : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Union[str, Any] = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : Tuple = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : List[Any] = layer_re.match(lowerCamelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Any = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : List[str] = m.group(3 )
# The name of the layer.
snake_case_ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : str = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : List[str] = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : Optional[Any] = masked_bias
snake_case_ : Optional[Any] = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : Any = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Dict = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Store. No change of shape.
snake_case_ : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : str = transformer["""final_layernorm.weight"""]
snake_case_ : int = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def UpperCAmelCase ( ):
'''simple docstring'''
# Create the argument parser.
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=lowerCamelCase_ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=lowerCamelCase_ , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : Tuple = parser.parse_args()
# Extract the basename.
snake_case_ : List[str] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Dict = torch.load(lowerCamelCase_ , map_location="""cpu""" )
else:
snake_case_ : Optional[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Optional[int] = input_state_dict.get("""args""" , lowerCamelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Tuple = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : int = """gelu_new"""
else:
snake_case_ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : int = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : int = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowerCamelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=lowerCamelCase_ , summary_activation=lowerCamelCase_ , summary_proj_to_labels=lowerCamelCase_ , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
snake_case_ : int = GPTaConfig.from_json_file(args.config_file )
snake_case_ : Optional[int] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Optional[Any] = convert_megatron_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase_ , lowerCamelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : List[str] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
snake_case_ : int = type(lowerCamelCase_ ).__name__
snake_case_ : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowerCamelCase_ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase_ )
# Store the state_dict to file.
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 267 | 1 |
"""simple docstring"""
import os
from distutils.util import strtobool
def UpperCAmelCase ( _lowercase : Union[str, Any] , _lowercase : Optional[Any] ) -> str:
"""simple docstring"""
for e in env_keys:
lowerCAmelCase_ = int(os.environ.get(_lowercase , -1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase ( _lowercase : List[Any] , _lowercase : Dict=False ) -> Any:
"""simple docstring"""
lowerCAmelCase_ = os.environ.get(_lowercase , str(_lowercase ) )
return strtobool(_lowercase ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase ( _lowercase : Dict , _lowercase : Union[str, Any]="no" ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ = os.environ.get(_lowercase , str(_lowercase ) )
return value | 552 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.txt'}
lowercase_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowercase_ = {
'YituTech/conv-bert-base': 5_12,
'YituTech/conv-bert-medium-small': 5_12,
'YituTech/conv-bert-small': 5_12,
}
lowercase_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __a ( __snake_case ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] =ConvBertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ = getattr(UpperCAmelCase , normalizer_state.pop('''type''' ) )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = strip_accents
lowerCAmelCase_ = tokenize_chinese_chars
lowerCAmelCase_ = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ = do_lower_case
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase ) | 552 | 1 |
import warnings
from functools import wraps
from typing import Callable
def __UpperCamelCase ( A ):
@wraps(_lowerCamelCase )
def _inner_fn(*A , **A ):
warnings.warn(
(f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , _lowerCamelCase , )
return fn(*_lowerCamelCase , **_lowerCamelCase )
return _inner_fn
| 717 | from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : "DiagonalGaussianDistribution"
class _A ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[str] =True
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE_ = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE_ = (64,) , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = "silu" , SCREAMING_SNAKE_CASE_ = 4 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 0.18215 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCamelCase__ = Encoder(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , down_block_types=SCREAMING_SNAKE_CASE_ , block_out_channels=SCREAMING_SNAKE_CASE_ , layers_per_block=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , double_z=SCREAMING_SNAKE_CASE_ , )
# pass init params to Decoder
UpperCamelCase__ = Decoder(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , up_block_types=SCREAMING_SNAKE_CASE_ , block_out_channels=SCREAMING_SNAKE_CASE_ , layers_per_block=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase__ = False
UpperCamelCase__ = False
# only relevant if vae tiling is enabled
UpperCamelCase__ = self.config.sample_size
UpperCamelCase__ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCamelCase__ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCamelCase__ = 0.25
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (Encoder, Decoder) ):
UpperCamelCase__ = value
def _a (self , SCREAMING_SNAKE_CASE_ = True ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = use_tiling
def _a (self ) -> int:
'''simple docstring'''
self.enable_tiling(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = True
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a (self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCamelCase__ = {}
def fn_recursive_add_processors(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if hasattr(SCREAMING_SNAKE_CASE_ , '''set_processor''' ):
UpperCamelCase__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return processors
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = len(self.attn_processors.keys() )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(SCREAMING_SNAKE_CASE_ )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if hasattr(SCREAMING_SNAKE_CASE_ , '''set_processor''' ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
module.set_processor(SCREAMING_SNAKE_CASE_ )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for name, module in self.named_children():
fn_recursive_attn_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Tuple:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
if self.use_slicing and x.shape[0] > 1:
UpperCamelCase__ = [self.encoder(SCREAMING_SNAKE_CASE_ ) for x_slice in x.split(1 )]
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = self.encoder(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DiagonalGaussianDistribution(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.decoder(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ )
@apply_forward_hook
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCamelCase__ = [self._decode(SCREAMING_SNAKE_CASE_ ).sample for z_slice in z.split(1 )]
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = self._decode(SCREAMING_SNAKE_CASE_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = min(a.shape[2] , b.shape[2] , SCREAMING_SNAKE_CASE_ )
for y in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = min(a.shape[3] , b.shape[3] , SCREAMING_SNAKE_CASE_ )
for x in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> AutoencoderKLOutput:
'''simple docstring'''
UpperCamelCase__ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase__ = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCamelCase__ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCamelCase__ = []
for i in range(0 , x.shape[2] , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for j in range(0 , x.shape[3] , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCamelCase__ = self.encoder(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE_ )
row.append(SCREAMING_SNAKE_CASE_ )
rows.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
for i, row in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for j, tile in enumerate(SCREAMING_SNAKE_CASE_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ = self.blend_v(rows[i - 1][j] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if j > 0:
UpperCamelCase__ = self.blend_h(row[j - 1] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(SCREAMING_SNAKE_CASE_ , dim=3 ) )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=2 )
UpperCamelCase__ = DiagonalGaussianDistribution(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCamelCase__ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase__ = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCamelCase__ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCamelCase__ = []
for i in range(0 , z.shape[2] , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for j in range(0 , z.shape[3] , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCamelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.decoder(SCREAMING_SNAKE_CASE_ )
row.append(SCREAMING_SNAKE_CASE_ )
rows.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
for i, row in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for j, tile in enumerate(SCREAMING_SNAKE_CASE_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ = self.blend_v(rows[i - 1][j] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if j > 0:
UpperCamelCase__ = self.blend_h(row[j - 1] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(SCREAMING_SNAKE_CASE_ , dim=3 ) )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCamelCase__ = sample
UpperCamelCase__ = self.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
if sample_posterior:
UpperCamelCase__ = posterior.sample(generator=SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = posterior.mode()
UpperCamelCase__ = self.decode(SCREAMING_SNAKE_CASE_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ )
| 469 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _A ( a_ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ""
__lowerCamelCase : Dict = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(self ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = repo_info
snake_case : Any = token
snake_case : int = None
def snake_case_ ( self ):
'''simple docstring'''
if self.dir_cache is None:
snake_case : List[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
snake_case : int = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE_ ): {"""name""": str(SCREAMING_SNAKE_CASE_ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "rb" ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
if not isinstance(self.repo_info ,SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
snake_case : str = hf_hub_url(self.repo_info.id ,SCREAMING_SNAKE_CASE_ ,revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE_ ,mode=SCREAMING_SNAKE_CASE_ ,headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open()
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self._get_dirs()
snake_case : List[Any] = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self._get_dirs()
snake_case : Optional[int] = PurePosixPath(path.strip("""/""" ) )
snake_case : str = {}
for p, f in self.dir_cache.items():
snake_case : Optional[Any] = PurePosixPath(p.strip("""/""" ) )
snake_case : int = p.parent
if root == path:
snake_case : int = f
snake_case : Optional[int] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 36 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[int] =field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCamelCase : bool =field(
default=a_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : str =field(
default=a_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : str =field(
default=a_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Train language if it is different from the evaluation language."} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : Optional[bool] =field(
default=a_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool =field(
default=a_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def snake_case_ () -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , __A )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase : int = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__lowerCAmelCase : Union[str, Any] = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCAmelCase : List[str] = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Dict = train_dataset.features["""label"""].names
if training_args.do_eval:
__lowerCAmelCase : Tuple = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Optional[int] = eval_dataset.features["""label"""].names
if training_args.do_predict:
__lowerCAmelCase : List[Any] = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Optional[Any] = predict_dataset.features["""label"""].names
# Labels
__lowerCAmelCase : Optional[int] = len(__A )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , idalabel={str(__A ): label for i, label in enumerate(__A )} , labelaid={label: i for i, label in enumerate(__A )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase : List[str] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase : int = False
def preprocess_function(__A : List[str] ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=__A , max_length=data_args.max_seq_length , truncation=__A , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowerCAmelCase : Union[str, Any] = min(len(__A ) , data_args.max_train_samples )
__lowerCAmelCase : List[str] = train_dataset.select(range(__A ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__lowerCAmelCase : Dict = train_dataset.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__A ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowerCAmelCase : str = min(len(__A ) , data_args.max_eval_samples )
__lowerCAmelCase : Optional[int] = eval_dataset.select(range(__A ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__lowerCAmelCase : List[Any] = eval_dataset.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__lowerCAmelCase : Dict = min(len(__A ) , data_args.max_predict_samples )
__lowerCAmelCase : Optional[int] = predict_dataset.select(range(__A ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
__lowerCAmelCase : Union[str, Any] = predict_dataset.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
__lowerCAmelCase : Optional[Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : EvalPrediction ):
__lowerCAmelCase : Optional[int] = p.predictions[0] if isinstance(p.predictions , __A ) else p.predictions
__lowerCAmelCase : Tuple = np.argmax(__A , axis=1 )
return metric.compute(predictions=__A , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase : Dict = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase : Union[str, Any] = DataCollatorWithPadding(__A , pad_to_multiple_of=8 )
else:
__lowerCAmelCase : Optional[int] = None
# Initialize our Trainer
__lowerCAmelCase : Optional[int] = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
__lowerCAmelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase : Tuple = last_checkpoint
__lowerCAmelCase : List[str] = trainer.train(resume_from_checkpoint=__A )
__lowerCAmelCase : int = train_result.metrics
__lowerCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
__lowerCAmelCase : Optional[int] = min(__A , len(__A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __A )
trainer.save_metrics("""train""" , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase : List[str] = trainer.evaluate(eval_dataset=__A )
__lowerCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
__lowerCAmelCase : List[Any] = min(__A , len(__A ) )
trainer.log_metrics("""eval""" , __A )
trainer.save_metrics("""eval""" , __A )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Any = trainer.predict(__A , metric_key_prefix="""predict""" )
__lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__A )
)
__lowerCAmelCase : str = min(__A , len(__A ) )
trainer.log_metrics("""predict""" , __A )
trainer.save_metrics("""predict""" , __A )
__lowerCAmelCase : Any = np.argmax(__A , axis=1 )
__lowerCAmelCase : int = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(__A , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__A ):
__lowerCAmelCase : List[Any] = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 651 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Dict = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''funnel'''
_UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Tuple , __lowerCAmelCase : Tuple=3_0522 , __lowerCAmelCase : Optional[Any]=[4, 4, 4] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[int]=3072 , __lowerCAmelCase : str="gelu_new" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1E-9 , __lowerCAmelCase : Any="mean" , __lowerCAmelCase : List[str]="relative_shift" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : str , ) -> int:
"""simple docstring"""
a = vocab_size
a = block_sizes
a = [1] * len(__lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(__lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
a = num_decoder_layers
a = d_model
a = n_head
a = d_head
a = d_inner
a = hidden_act
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = initializer_range
a = initializer_std
a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
a = attention_type
a = separate_cls
a = truncate_seq
a = pool_q_only
super().__init__(**__lowerCAmelCase )
@property
def A ( self : Tuple ) -> Any:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def A ( self : str , __lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def A ( self : List[str] , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 32 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
def lowercase__( _UpperCamelCase : str )-> str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(_UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 138 | 0 |
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ , A__ = len(__a ), len(grid[0] )
if (
min(__a ,__a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(__a ,row + 1 ,__a ,__a )
count += depth_first_search(__a ,row - 1 ,__a ,__a )
count += depth_first_search(__a ,__a ,col + 1 ,__a )
count += depth_first_search(__a ,__a ,col - 1 ,__a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ = "cpu" ,lowerCAmelCase__ = None ):
A__ = torch.load(lowerCAmelCase__ ,map_location=lowerCAmelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase__ ,torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 554 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Optional[Any]:
_A = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Optional[int]:
_A , _A = emb.weight.shape
_A = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
_A = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Tuple="facebook/mbart-large-en-ro" , _snake_case :Tuple=False , _snake_case :Optional[Any]=False ) -> Optional[int]:
_A = torch.load(_snake_case , map_location='''cpu''' )['''model''']
remove_ignore_keys_(_snake_case )
_A = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_A = MBartConfig.from_pretrained(_snake_case , vocab_size=_snake_case )
if mbart_aa and finetuned:
_A = '''relu'''
_A = state_dict['''decoder.embed_tokens.weight''']
_A = MBartForConditionalGeneration(_snake_case )
model.model.load_state_dict(_snake_case )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | """simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def a_ ( _lowerCAmelCase : int = 200_0000 ):
'''simple docstring'''
lowercase__ : list[int] = [0]
lowercase__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase__ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowercase__ : int = 0
# an estimate of b, using the quadratic formula
lowercase__ : float
# the largest integer less than b_estimate
lowercase__ : int
# the largest integer less than b_estimate
lowercase__ : int
# the triangle number corresponding to b_floor
lowercase__ : int
# the triangle number corresponding to b_ceil
lowercase__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowercase__ : int = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase__ : Union[str, Any] = floor(_lowerCAmelCase )
lowercase__ : Tuple = ceil(_lowerCAmelCase )
lowercase__ : Optional[Any] = triangle_numbers[b_floor]
lowercase__ : Optional[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase__ : Union[str, Any] = triangle_b_first_guess * triangle_a
lowercase__ : Optional[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase__ : str = triangle_b_second_guess * triangle_a
lowercase__ : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 599 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class __a :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowerCamelCase )
if nodea not in self.connections:
self.add_node(lowerCamelCase )
__SCREAMING_SNAKE_CASE = probability
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(self.connections )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict[str, int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = Counter(graph.get_nodes() )
__SCREAMING_SNAKE_CASE = start
for _ in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = graph.transition(__UpperCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 ):
'''simple docstring'''
_lowerCAmelCase = right or len(SCREAMING_SNAKE_CASE_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Tuple =1
for i in range(1, num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Optional[Any] =0
while number > 0:
A__ : List[Any] =number % 1_0
sum_of_digits += last_digit
A__ : str =number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 1_0_0 ) -> int:
A__ : List[str] =factorial(snake_case_ )
A__ : str =split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 416 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : int ) -> bool:
"""simple docstring"""
lowercase_ : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 7 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """MobileNetV1Config"""
# Base docstring
SCREAMING_SNAKE_CASE__ : int = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE__ : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ : Tuple = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ : Tuple = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowercase ( snake_case, snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :str = {}
if isinstance(snake_case, snake_case ):
__magic_name__ :List[str] = model.mobilenet_va
else:
__magic_name__ :Union[str, Any] = model
__magic_name__ :List[Any] = '''MobilenetV1/Conv2d_0/'''
__magic_name__ :Union[str, Any] = backbone.conv_stem.convolution.weight
__magic_name__ :str = backbone.conv_stem.normalization.bias
__magic_name__ :Tuple = backbone.conv_stem.normalization.weight
__magic_name__ :str = backbone.conv_stem.normalization.running_mean
__magic_name__ :List[Any] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__magic_name__ :List[str] = i + 1
__magic_name__ :int = i * 2
__magic_name__ :Union[str, Any] = backbone.layer[pt_index]
__magic_name__ :Any = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__magic_name__ :Optional[int] = pointer.convolution.weight
__magic_name__ :Union[str, Any] = pointer.normalization.bias
__magic_name__ :Dict = pointer.normalization.weight
__magic_name__ :Dict = pointer.normalization.running_mean
__magic_name__ :List[str] = pointer.normalization.running_var
__magic_name__ :Tuple = backbone.layer[pt_index + 1]
__magic_name__ :Any = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__magic_name__ :Tuple = pointer.convolution.weight
__magic_name__ :int = pointer.normalization.bias
__magic_name__ :int = pointer.normalization.weight
__magic_name__ :Dict = pointer.normalization.running_mean
__magic_name__ :int = pointer.normalization.running_var
if isinstance(snake_case, snake_case ):
__magic_name__ :Optional[int] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__magic_name__ :Optional[int] = model.classifier.weight
__magic_name__ :List[Any] = model.classifier.bias
return tf_to_pt_map
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__magic_name__ :Optional[Any] = tf.train.list_variables(snake_case )
__magic_name__ :Optional[Any] = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
__magic_name__ :Optional[Any] = tf.train.load_variable(snake_case, snake_case )
__magic_name__ :str = array
# Build TF to PyTorch weights loading map
__magic_name__ :Any = _build_tf_to_pytorch_map(snake_case, snake_case, snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
__magic_name__ :List[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__magic_name__ :Any = np.transpose(snake_case, (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__magic_name__ :Dict = array.squeeze().transpose()
else:
__magic_name__ :Optional[Any] = np.transpose(snake_case, (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
__magic_name__ :List[str] = torch.from_numpy(snake_case )
tf_weights.pop(snake_case, snake_case )
tf_weights.pop(name + '''/RMSProp''', snake_case )
tf_weights.pop(name + '''/RMSProp_1''', snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''', snake_case )
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :int = features.shape[-2:]
__magic_name__ , __magic_name__ :Optional[Any] = conv_layer.stride
__magic_name__ , __magic_name__ :Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
__magic_name__ :Optional[int] = max(kernel_height - stride_height, 0 )
else:
__magic_name__ :List[Any] = max(kernel_height - (in_height % stride_height), 0 )
if in_width % stride_width == 0:
__magic_name__ :Any = max(kernel_width - stride_width, 0 )
else:
__magic_name__ :str = max(kernel_width - (in_width % stride_width), 0 )
__magic_name__ :Tuple = pad_along_width // 2
__magic_name__ :Any = pad_along_width - pad_left
__magic_name__ :Union[str, Any] = pad_along_height // 2
__magic_name__ :str = pad_along_height - pad_top
__magic_name__ :Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case, snake_case, '''constant''', 0.0 )
class lowerCamelCase_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = 1 , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
"""simple docstring"""
super().__init__()
__magic_name__ :List[str] = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__magic_name__ :Union[str, Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__magic_name__ :Union[str, Any] = nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=__lowerCAmelCase , groups=__lowerCAmelCase , bias=__lowerCAmelCase , padding_mode='''zeros''' , )
if use_normalization:
__magic_name__ :Tuple = nn.BatchNormad(
num_features=__lowerCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=__lowerCAmelCase , track_running_stats=__lowerCAmelCase , )
else:
__magic_name__ :Optional[Any] = None
if use_activation:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowerCAmelCase ):
__magic_name__ :Any = ACTaFN[config.hidden_act]
else:
__magic_name__ :Union[str, Any] = config.hidden_act
else:
__magic_name__ :List[Any] = None
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.config.tf_padding:
__magic_name__ :Optional[Any] = apply_tf_padding(__lowerCAmelCase , self.convolution )
__magic_name__ :str = self.convolution(__lowerCAmelCase )
if self.normalization is not None:
__magic_name__ :Optional[int] = self.normalization(__lowerCAmelCase )
if self.activation is not None:
__magic_name__ :int = self.activation(__lowerCAmelCase )
return features
class lowerCamelCase_ ( lowerCamelCase ):
a__ = MobileNetVaConfig
a__ = load_tf_weights_in_mobilenet_va
a__ = '''mobilenet_v1'''
a__ = '''pixel_values'''
a__ = False
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__ : str = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ : str = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , lowerCamelCase , )
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = True ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = config
__magic_name__ :Optional[Any] = 3_2
__magic_name__ :List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
__magic_name__ :List[Any] = MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=config.num_channels , out_channels=__lowerCAmelCase , kernel_size=3 , stride=2 , )
__magic_name__ :Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__magic_name__ :Dict = nn.ModuleList()
for i in range(1_3 ):
__magic_name__ :Optional[int] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__magic_name__ :Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=1 , ) )
__magic_name__ :Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__ :Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__magic_name__ :str = self.conv_stem(__lowerCAmelCase )
__magic_name__ :Optional[int] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__magic_name__ :int = layer_module(__lowerCAmelCase )
if output_hidden_states:
__magic_name__ :List[str] = all_hidden_states + (hidden_states,)
__magic_name__ :Tuple = hidden_states
if self.pooler is not None:
__magic_name__ :Optional[int] = torch.flatten(self.pooler(__lowerCAmelCase ) , start_dim=1 )
else:
__magic_name__ :Optional[int] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCamelCase , )
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
__magic_name__ :Optional[Any] = config.num_labels
__magic_name__ :List[str] = MobileNetVaModel(__lowerCAmelCase )
__magic_name__ :Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__magic_name__ :int = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = nn.Linear(__lowerCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ :List[str] = self.mobilenet_va(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
__magic_name__ :str = outputs.pooler_output if return_dict else outputs[1]
__magic_name__ :Any = self.classifier(self.dropout(__lowerCAmelCase ) )
__magic_name__ :Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__magic_name__ :List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__magic_name__ :Union[str, Any] = '''single_label_classification'''
else:
__magic_name__ :Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
__magic_name__ :int = MSELoss()
if self.num_labels == 1:
__magic_name__ :Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__magic_name__ :str = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
__magic_name__ :Optional[Any] = CrossEntropyLoss()
__magic_name__ :int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__magic_name__ :Optional[Any] = BCEWithLogitsLoss()
__magic_name__ :Union[str, Any] = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
__magic_name__ :Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , )
| 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = CanineTokenizer
__UpperCAmelCase : int = False
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def _UpperCamelCase ( self , **a_ ):
lowerCamelCase_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
lowerCamelCase_ : Dict = 1024
return tokenizer
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.canine_tokenizer
lowerCamelCase_ : str = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
lowerCamelCase_ : Dict = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ : List[Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a_ , a_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.canine_tokenizer
lowerCamelCase_ : Tuple = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
lowerCamelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , a_ )
self.assertIn("attention_mask" , a_ )
self.assertIn("token_type_ids" , a_ )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.canine_tokenizer
lowerCamelCase_ : Tuple = [
"What's the weater?",
"It's about 25 degrees.",
]
lowerCamelCase_ : Optional[Any] = tokenizer(
text_target=a_ , max_length=32 , padding="max_length" , truncation=a_ , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCamelCase ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCamelCase_ : Dict = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : List[Any] = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
shutil.rmtree(a_ )
lowerCamelCase_ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : List[Any] = tempfile.mkdtemp()
lowerCamelCase_ : Tuple = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Dict = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ : List[str] = chr(0Xe007 )
additional_special_tokens.append(a_ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Any = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : Any = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
self.assertIn(a_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ : int = tokenizer.__class__.from_pretrained(a_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ ,lowerCamelCase_ : str = self.get_clean_sequence(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Tuple = 0Xe005
lowerCamelCase_ : Dict = chr(a_ )
tokenizer.add_special_tokens({"cls_token": special_token} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
lowerCamelCase_ : List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=a_ )
lowerCamelCase_ : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(a_ , input_encoded + special_token_id )
lowerCamelCase_ : Optional[int] = tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Optional[int] = chr(0Xe005 )
lowerCamelCase_ : str = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=a_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
lowerCamelCase_ : Tuple = tokenizer.tokenize(a_ )
lowerCamelCase_ : List[Any] = tokenizer.tokenize(a_ )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(token_a[0] , a_ )
self.assertEqual(token_a[0] , a_ )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ : List[str] = 0Xe006
lowerCamelCase_ : Any = chr(a_ )
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(a_ )
tokenizer.from_pretrained(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : List[Any] = json.load(a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : int = json.load(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Any = 0Xe006
lowerCamelCase_ : List[Any] = chr(a_ )
lowerCamelCase_ : Any = [new_token_a]
lowerCamelCase_ : Optional[Any] = [new_token_a]
with open(os.path.join(a_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ : str = tokenizer_class.from_pretrained(a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ : Optional[int] = 0Xe007
lowerCamelCase_ : List[str] = chr(a_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ : int = [AddedToken(a_ , lstrip=a_ )]
lowerCamelCase_ : Dict = tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Union[str, Any] = "hello world"
if self.space_between_special_tokens:
lowerCamelCase_ : int = "[CLS] hello world [SEP]"
else:
lowerCamelCase_ : int = input
lowerCamelCase_ : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.decode(a_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(a_ , [output, output.lower()] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Tuple = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCamelCase_ : Optional[int] = "a"
lowerCamelCase_ : Dict = ord(a_ )
for attr in attributes_list:
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [] )
lowerCamelCase_ : Optional[int] = 0Xe006
lowerCamelCase_ : List[str] = chr(a_ )
setattr(a_ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
| 250 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
set_seed(770)
lowerCamelCase__ : Union[str, Any] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
lowerCamelCase__ : List[str] = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
lowerCamelCase__ : Optional[Any] = os.path.dirname(os.path.abspath(__file__))
lowerCamelCase__ : int = os.path.join(os.path.expanduser("~"), ".cache")
lowerCamelCase__ : Dict = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def __A ( a_ : List[Any] , a_ : Dict=False )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = model_type
if use_small:
key += "_small"
return os.path.join(a_ , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def __A ( a_ : List[Any] , a_ : Any )-> List[Any]:
'''simple docstring'''
os.makedirs(a_ , exist_ok=a_ )
hf_hub_download(repo_id=a_ , filename=a_ , local_dir=a_ )
def __A ( a_ : Dict , a_ : int , a_ : Dict=False , a_ : Dict="text" )-> str:
'''simple docstring'''
if model_type == "text":
SCREAMING_SNAKE_CASE : List[Any] = BarkSemanticModel
SCREAMING_SNAKE_CASE : int = BarkSemanticConfig
SCREAMING_SNAKE_CASE : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
SCREAMING_SNAKE_CASE : Union[str, Any] = BarkCoarseModel
SCREAMING_SNAKE_CASE : Optional[int] = BarkCoarseConfig
SCREAMING_SNAKE_CASE : str = BarkCoarseGenerationConfig
elif model_type == "fine":
SCREAMING_SNAKE_CASE : Dict = BarkFineModel
SCREAMING_SNAKE_CASE : Optional[Any] = BarkFineConfig
SCREAMING_SNAKE_CASE : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
SCREAMING_SNAKE_CASE : Any = F"{model_type}_small" if use_small else model_type
SCREAMING_SNAKE_CASE : List[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(a_ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(a_ , map_location=a_ )
# this is a hack
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
SCREAMING_SNAKE_CASE : Optional[Any] = model_args['vocab_size']
SCREAMING_SNAKE_CASE : Optional[Any] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
SCREAMING_SNAKE_CASE : Any = model_args.pop('''n_head''' )
SCREAMING_SNAKE_CASE : Tuple = model_args.pop('''n_embd''' )
SCREAMING_SNAKE_CASE : int = model_args.pop('''n_layer''' )
SCREAMING_SNAKE_CASE : str = ConfigClass(**checkpoint['''model_args'''] )
SCREAMING_SNAKE_CASE : List[str] = ModelClass(config=a_ )
SCREAMING_SNAKE_CASE : List[Any] = GenerationConfigClass()
SCREAMING_SNAKE_CASE : Optional[Any] = model_generation_config
SCREAMING_SNAKE_CASE : int = checkpoint['model']
# fixup checkpoint
SCREAMING_SNAKE_CASE : Optional[Any] = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(a_ ):
# replace part of the key with corresponding layer name in HF implementation
SCREAMING_SNAKE_CASE : Union[str, Any] = k[len(a_ ) :]
for old_layer_name in new_layer_name_dict:
SCREAMING_SNAKE_CASE : List[Any] = new_k.replace(a_ , new_layer_name_dict[old_layer_name] )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
SCREAMING_SNAKE_CASE : List[str] = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
SCREAMING_SNAKE_CASE : Union[str, Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
SCREAMING_SNAKE_CASE : Optional[Any] = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(a_ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(a_ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(a_ , strict=a_ )
SCREAMING_SNAKE_CASE : int = model.num_parameters(exclude_embeddings=a_ )
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1E6 , 1 )}M params, {round(a_ , 3 )} loss" )
model.eval()
model.to(a_ )
del checkpoint, state_dict
return model
def __A ( a_ : Optional[int] , a_ : Optional[int]=False , a_ : Union[str, Any]="text" )-> Dict:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
SCREAMING_SNAKE_CASE : List[str] = 'cpu' # do conversion on cpu
SCREAMING_SNAKE_CASE : str = _get_ckpt_path(a_ , use_small=a_ )
SCREAMING_SNAKE_CASE : List[Any] = _load_model(a_ , a_ , model_type=a_ , use_small=a_ )
# load bark initial model
SCREAMING_SNAKE_CASE : Optional[int] = _bark_load_model(a_ , '''cpu''' , model_type=a_ , use_small=a_ )
if model_type == "text":
SCREAMING_SNAKE_CASE : str = bark_model['model']
if model.num_parameters(exclude_embeddings=a_ ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
SCREAMING_SNAKE_CASE : Optional[Any] = 5
SCREAMING_SNAKE_CASE : Optional[Any] = 10
if model_type in ["text", "coarse"]:
SCREAMING_SNAKE_CASE : Any = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = bark_model(a_ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = model(a_ )
# take last logits
SCREAMING_SNAKE_CASE : List[Any] = output_new_model_total.logits[:, [-1], :]
else:
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : List[str] = 8
SCREAMING_SNAKE_CASE : str = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = bark_model(a_ , a_ )
SCREAMING_SNAKE_CASE : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
def __A ( a_ : List[str] , a_ : Dict , a_ : Optional[int] , a_ : int , a_ : List[str] , a_ : str , )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.join(a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(a_ , '''config.json''' ) )
SCREAMING_SNAKE_CASE : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(a_ , '''config.json''' ) )
SCREAMING_SNAKE_CASE : Any = BarkFineConfig.from_pretrained(os.path.join(a_ , '''config.json''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
SCREAMING_SNAKE_CASE : List[str] = BarkSemanticModel.from_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = BarkCoarseModel.from_pretrained(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = BarkFineModel.from_pretrained(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
SCREAMING_SNAKE_CASE : List[Any] = BarkConfig.from_sub_model_configs(
a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
SCREAMING_SNAKE_CASE : Union[str, Any] = BarkModel(a_ )
SCREAMING_SNAKE_CASE : List[Any] = semantic
SCREAMING_SNAKE_CASE : int = coarseAcoustic
SCREAMING_SNAKE_CASE : Union[str, Any] = fineAcoustic
SCREAMING_SNAKE_CASE : Any = codec
SCREAMING_SNAKE_CASE : Dict = bark_generation_config
Path(a_ ).mkdir(exist_ok=a_ )
bark.save_pretrained(a_ , repo_id=a_ , push_to_hub=a_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
lowerCamelCase__ : Optional[int] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 716 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowerCAmelCase = "pt"
elif is_tf_available():
__lowerCAmelCase = "tf"
else:
__lowerCAmelCase = "jax"
class __SCREAMING_SNAKE_CASE (__A , unittest.TestCase ):
"""simple docstring"""
_a : List[Any] = ByTaTokenizer
_a : Optional[Any] = False
def _a ( self ):
"""simple docstring"""
super().setUp()
a_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self ):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _a ( self , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=20 , UpperCamelCase__=5 ):
"""simple docstring"""
a_ = []
for i in range(len(UpperCamelCase__ ) ):
try:
a_ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a_ = list(filter(lambda UpperCamelCase__ : re.match(R'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase__ ) )
a_ = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
a_ = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
a_ = toks + toks
# toks_str = [t[1] for t in toks]
a_ = [t[0] for t in toks]
# Ensure consistency
a_ = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
a_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
a_ = ' ' + output_txt
a_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def _a ( self ):
"""simple docstring"""
a_ = self.ta_base_tokenizer
a_ = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
a_ = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def _a ( self ):
"""simple docstring"""
a_ = self.ta_base_tokenizer
a_ = 'Unicode €.'
a_ = tokenizer(UpperCamelCase__ )
a_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase__ )
# decoding
a_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , 'Unicode €.</s>' )
a_ = tokenizer('e è é ê ë' )
a_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase__ )
# decoding
a_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def _a ( self ):
"""simple docstring"""
a_ = self.ta_base_tokenizer
a_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
a_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
a_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
a_ = list(batch.input_ids.numpy()[0] )
else:
a_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _a ( self ):
"""simple docstring"""
a_ = self.ta_base_tokenizer
a_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase__ )
self.assertIn('attention_mask' , UpperCamelCase__ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase__ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = self.ta_base_tokenizer
a_ = [
'Summary of the text.',
'Another summary.',
]
a_ = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding='max_length' , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _a ( self ):
"""simple docstring"""
a_ = self.ta_base_tokenizer
a_ = ['A long paragraph for summarization. </s>']
a_ = ['Summary of the text. </s>']
# fmt: off
a_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
a_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
a_ = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase__ , batch['labels'][0] )
def _a ( self ):
"""simple docstring"""
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = ' He is very happy, UNwant\u00E9d,running'
a_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
a_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
a_ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
a_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
a_ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
a_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
a_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
a_ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
a_ = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
a_ = json.load(UpperCamelCase__ )
a_ = [f'<extra_id_{i}>' for i in range(125 )]
a_ = added_tokens_extra_ids + [
'an_additional_special_token'
]
a_ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a_ = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a_ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase__ )]
a_ = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _a ( self ):
"""simple docstring"""
a_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
a_ = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def _a ( self ):
"""simple docstring"""
pass
def _a ( self ):
"""simple docstring"""
pass
def _a ( self ):
"""simple docstring"""
pass
def _a ( self ):
"""simple docstring"""
pass
def _a ( self ):
"""simple docstring"""
a_ = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
a_ = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
a_ = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
a_ = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
a_ = 0
a_ = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + '_id' , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + '_id' ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + '_id' , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + '_id' ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 536 |
'''simple docstring'''
import math
import qiskit
def __UpperCamelCase ( lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : int = 1 ):
"""simple docstring"""
if (
isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
a_ = qiskit.QuantumRegister(4 , 'qr' )
a_ = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
a_ = [input_a, input_a, carry_in]
a_ = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase_ ) # measure the last two qbits
a_ = qiskit.Aer.get_backend('aer_simulator' )
a_ = qiskit.execute(lowercase_ , lowercase_ , shots=1_000 )
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 536 | 1 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> Optional[int]:
'''simple docstring'''
super().__init__(*snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int =deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(snake_case)
_UpperCAmelCase : List[Any] =self.values[key]
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (
sum(self.charge_factor - len(snake_case) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase ( self , snake_case , snake_case=None) -> int:
'''simple docstring'''
if not (
len(self.values[key]) == self.charge_factor and self.values.count(snake_case) == 0
):
return key
return super()._collision_resolution(snake_case , snake_case)
| 331 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =None
UpperCAmelCase =BloomTokenizerFast
UpperCAmelCase =BloomTokenizerFast
UpperCAmelCase =True
UpperCAmelCase =False
UpperCAmelCase ="tokenizer_file"
UpperCAmelCase ={"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
super().setUp()
_UpperCAmelCase : Union[str, Any] =BloomTokenizerFast.from_pretrained('bigscience/tokenizer')
tokenizer.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self , **snake_case) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.get_rust_tokenizer()
_UpperCAmelCase : Any =['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_UpperCAmelCase : int =[[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
_UpperCAmelCase : Tuple =tokenizer.batch_encode_plus(snake_case)['input_ids']
self.assertListEqual(snake_case , snake_case)
_UpperCAmelCase : Any =tokenizer.batch_decode(snake_case)
self.assertListEqual(snake_case , snake_case)
def lowerCAmelCase ( self , snake_case=6) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase : Optional[int] =self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCAmelCase : Dict ='This is a simple input'
_UpperCAmelCase : str =['This is a simple input 1', 'This is a simple input 2']
_UpperCAmelCase : List[Any] =('This is a simple input', 'This is a pair')
_UpperCAmelCase : Union[str, Any] =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case , max_length=snake_case)
tokenizer_r.encode_plus(snake_case , max_length=snake_case)
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case)
tokenizer_r.encode(snake_case , max_length=snake_case)
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case)
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding')
_UpperCAmelCase : Tuple =None # Hotfixing padding = None
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length')
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length')
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length')
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length')
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] =load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case)
_UpperCAmelCase : List[Any] =next(iter(snake_case))['premise'] # pick up one data
_UpperCAmelCase : Union[str, Any] =list(sample_data.values())
_UpperCAmelCase : Dict =list(map(tokenizer.encode , snake_case))
_UpperCAmelCase : Optional[Any] =[tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case) for x in output_tokens]
self.assertListEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 331 | 1 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=30 ,_snake_case=2 ,_snake_case=3 ,_snake_case=True ,_snake_case=True ,_snake_case=32 ,_snake_case=5 ,_snake_case=4 ,_snake_case=37 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=10 ,_snake_case=0.02 ,_snake_case=3 ,_snake_case=None ,_snake_case=2 ,):
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Dict = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ : List[str] = (image_size // patch_size) ** 2
UpperCAmelCase_ : Union[str, Any] = num_patches + 2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : int = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = DeiTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = DeiTForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : int = DeiTForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = model(_snake_case )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[int] = self.type_sequence_label_size
UpperCAmelCase_ : List[Any] = DeiTForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Tuple = DeiTForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = config_and_inputs
UpperCAmelCase_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[Any] =(
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__A : str =(
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__A : Optional[int] =False
__A : Optional[Any] =False
__A : List[str] =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = DeiTModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_snake_case )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=False ):
UpperCAmelCase_ : str = super()._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_snake_case )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : Dict = model_class(_snake_case )
model.to(_snake_case )
model.train()
UpperCAmelCase_ : Any = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
UpperCAmelCase_ : Union[str, Any] = model(**_snake_case ).loss
loss.backward()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : Optional[Any] = model_class(_snake_case )
model.gradient_checkpointing_enable()
model.to(_snake_case )
model.train()
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
UpperCAmelCase_ : Dict = model(**_snake_case ).loss
loss.backward()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_snake_case ),
*get_values(_snake_case ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCAmelCase_ : Tuple = problem_type["title"]
UpperCAmelCase_ : Dict = problem_type["num_labels"]
UpperCAmelCase_ : int = model_class(_snake_case )
model.to(_snake_case )
model.train()
UpperCAmelCase_ : List[Any] = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : Tuple = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type["num_labels"] )
UpperCAmelCase_ : Optional[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_snake_case ) as warning_list:
UpperCAmelCase_ : Dict = model(**_snake_case ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def a__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case (unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_snake_case )
# verify the logits
UpperCAmelCase_ : Optional[int] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_snake_case )
UpperCAmelCase_ : Tuple = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" ,torch_dtype=torch.floataa ,device_map="auto" )
UpperCAmelCase_ : Optional[Any] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_snake_case ,return_tensors="pt" )
UpperCAmelCase_ : Union[str, Any] = inputs.pixel_values.to(_snake_case )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(_snake_case )
| 71 |
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE : Union[str, Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
SCREAMING_SNAKE_CASE : List[Any] = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
SCREAMING_SNAKE_CASE : Any = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
a_ : Optional[int] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
a_ : Dict = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
a_ : Optional[Any] = evaluate(dataset=UpperCamelCase_ , predictions=UpperCamelCase_ )
return score
| 419 | 0 |
def _lowerCAmelCase ( _a : int = 10**9 ) -> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 720 |
from collections.abc import Generator
from math import sin
def _lowerCAmelCase ( _a : bytes ) -> bytes:
if len(_a ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCAmelCase_ : Any = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowerCAmelCase ( _a : int ) -> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase_ : Tuple = format(_a , """08x""" )[-8:]
lowerCAmelCase_ : Any = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def _lowerCAmelCase ( _a : bytes ) -> bytes:
lowerCAmelCase_ : Tuple = B""""""
for char in message:
bit_string += format(_a , """08b""" ).encode("""utf-8""" )
lowerCAmelCase_ : Dict = format(len(_a ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_a ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowerCAmelCase ( _a : bytes ) -> Generator[list[int], None, None]:
if len(_a ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(_a ) , 5_12 ):
lowerCAmelCase_ : int = bit_string[pos : pos + 5_12]
lowerCAmelCase_ : Any = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowerCAmelCase ( _a : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase_ : List[str] = format(_a , """032b""" )
lowerCAmelCase_ : Optional[int] = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_a , 2 )
def _lowerCAmelCase ( _a : int , _a : int ) -> int:
return (a + b) % 2**32
def _lowerCAmelCase ( _a : int , _a : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowerCAmelCase ( _a : bytes ) -> bytes:
lowerCAmelCase_ : Union[str, Any] = preprocess(_a )
lowerCAmelCase_ : Optional[int] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCAmelCase_ : Tuple = 0X67452301
lowerCAmelCase_ : Optional[int] = 0XEFCDAB89
lowerCAmelCase_ : Tuple = 0X98BADCFE
lowerCAmelCase_ : Tuple = 0X10325476
lowerCAmelCase_ : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_a ):
lowerCAmelCase_ : int = aa
lowerCAmelCase_ : Any = ba
lowerCAmelCase_ : List[str] = ca
lowerCAmelCase_ : Optional[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase_ : int = d ^ (b & (c ^ d))
lowerCAmelCase_ : List[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase_ : List[Any] = c ^ (d & (b ^ c))
lowerCAmelCase_ : Optional[Any] = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase_ : Union[str, Any] = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = (3 * i + 5) % 16
else:
lowerCAmelCase_ : Any = c ^ (b | not_aa(_a ))
lowerCAmelCase_ : List[str] = (7 * i) % 16
lowerCAmelCase_ : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase_ : Tuple = d
lowerCAmelCase_ : Optional[Any] = c
lowerCAmelCase_ : Dict = b
lowerCAmelCase_ : List[Any] = sum_aa(_a , left_rotate_aa(_a , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase_ : Optional[int] = sum_aa(_a , _a )
lowerCAmelCase_ : Optional[int] = sum_aa(_a , _a )
lowerCAmelCase_ : Dict = sum_aa(_a , _a )
lowerCAmelCase_ : Tuple = sum_aa(_a , _a )
lowerCAmelCase_ : int = reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440 | 0 |
def a__ ( A__ = 6_0_0_8_5_1_4_7_5_1_4_3 ):
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = int(A__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE_ : Tuple = i
while n % i == 0:
SCREAMING_SNAKE_CASE_ : Tuple = n // i
i += 1
return int(A__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 101 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Dict = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 442 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default="cifar10", metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The column name of the images in the files."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "A folder containing the training data."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase = field(
default=0.1_5, metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
}, )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = {}
if self.train_dir is not None:
_UpperCamelCase = self.train_dir
if self.validation_dir is not None:
_UpperCamelCase = self.validation_dir
_UpperCamelCase = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
UpperCAmelCase = field(
default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
}, )
UpperCAmelCase = field(
default=0.7_5, metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = field(
default=1e-3, metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def _snake_case ( __snake_case ):
_UpperCamelCase = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCamelCase = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
_UpperCamelCase = ds['''train'''].train_test_split(data_args.train_val_split )
_UpperCamelCase = split['''train''']
_UpperCamelCase = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
_UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_UpperCamelCase = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
_UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
_UpperCamelCase = ViTMAEForPreTraining(__snake_case )
if training_args.do_train:
_UpperCamelCase = ds['''train'''].column_names
else:
_UpperCamelCase = ds['''validation'''].column_names
if data_args.image_column_name is not None:
_UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
_UpperCamelCase = '''image'''
elif "img" in column_names:
_UpperCamelCase = '''img'''
else:
_UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_UpperCamelCase = image_processor.size['''shortest_edge''']
else:
_UpperCamelCase = (image_processor.size['''height'''], image_processor.size['''width'''])
_UpperCamelCase = Compose(
[
Lambda(lambda __snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__snake_case ):
_UpperCamelCase = [transforms(__snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
_UpperCamelCase = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
_UpperCamelCase = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Compute absolute learning rate
_UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCamelCase = trainer.evaluate()
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
# Write model card and (optionally) push to hub
_UpperCamelCase = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 71 | import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case , __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_UpperCamelCase = None
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
elif name.split('''.''' )[0] == "proj":
_UpperCamelCase = fairseq_model.proj
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
def _snake_case ( __snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.split(''' ''' )[0] for line in lines]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case )
_UpperCamelCase = SpeechaTextaConfig.from_pretrained(
__snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
_UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case )
_UpperCamelCase = SpeechaTextaForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
_UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
_UpperCamelCase = False
# add projection layer
_UpperCamelCase = nn.Parameter(projection_layer.weight )
_UpperCamelCase = nn.Parameter(projection_layer.bias )
_UpperCamelCase = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
_UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''speech_to_text_2'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 71 | 1 |
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
def count_of_possible_combinations(lowercase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
lowercase__ , lowercase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowercase= sum(
count_of_possible_combinations_with_dp_array(target - item , lowercase__ )
for item in array )
__lowercase= answer
return answer
__lowercase= [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [0] * (target + 1)
__lowercase= 1
for i in range(1 , target + 1 ):
for j in range(lowercase__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = 3
lowerCAmelCase = 5
lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 230 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''xlm-roberta-xl'''
def __init__(self , lowerCAmelCase=2_5_0_8_8_0 , lowerCAmelCase=2_5_6_0 , lowerCAmelCase=3_6 , lowerCAmelCase=3_2 , lowerCAmelCase=1_0_2_4_0 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_4 , lowerCAmelCase=1 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-05 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= position_embedding_type
__lowercase= use_cache
__lowercase= classifier_dropout
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 230 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a = get_logger()
a = None
class UpperCamelCase__ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Any ):
'''simple docstring'''
super().__init__(features=UpperCamelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCamelCase__ )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
lowercase_ = device if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
lowercase_ = str(jax.devices()[0] )
lowercase_ = jnp_array_kwargs
@staticmethod
def UpperCAmelCase__ ( ):
'''simple docstring'''
import jax
return {str(UpperCamelCase__ ): device for device in jax.devices()}
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and column:
if all(
isinstance(UpperCamelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase__ , axis=0 )
return column
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , (str, bytes, type(UpperCamelCase__ )) ):
return value
elif isinstance(UpperCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ = {}
if isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase_ = {"""dtype""": jnp.intaa}
else:
lowercase_ = {"""dtype""": jnp.intaa}
elif isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowercase_ = np.asarray(UpperCamelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase__ , """__array__""" ) and not isinstance(UpperCamelCase__ , jax.Array ):
lowercase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCamelCase__ , map_list=UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : pa.Table ):
'''simple docstring'''
lowercase_ = self.numpy_arrow_extractor().extract_row(UpperCamelCase__ )
lowercase_ = self.python_features_decoder.decode_row(UpperCamelCase__ )
return self.recursive_tensorize(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : pa.Table ):
'''simple docstring'''
lowercase_ = self.numpy_arrow_extractor().extract_column(UpperCamelCase__ )
lowercase_ = self.python_features_decoder.decode_column(UpperCamelCase__ , pa_table.column_names[0] )
lowercase_ = self.recursive_tensorize(UpperCamelCase__ )
lowercase_ = self._consolidate(UpperCamelCase__ )
return column
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : pa.Table ):
'''simple docstring'''
lowercase_ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase__ )
lowercase_ = self.python_features_decoder.decode_batch(UpperCamelCase__ )
lowercase_ = self.recursive_tensorize(UpperCamelCase__ )
for column_name in batch:
lowercase_ = self._consolidate(batch[column_name] )
return batch
| 711 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int=() , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[str]="no" , _UpperCamelCase : str="29500" ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = PrepareForLaunch(_UpperCamelCase , distributed_type='TPU' )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr='127.0.01' , master_port=_UpperCamelCase , mixed_precision=_UpperCamelCase ):
SCREAMING_SNAKE_CASE = PrepareForLaunch(_UpperCamelCase , distributed_type='MULTI_GPU' )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=() , _UpperCamelCase : Optional[Any]=2 ) -> str:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE = PrepareForLaunch(_UpperCamelCase , debug=_UpperCamelCase )
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method='fork' )
| 439 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 9
SCREAMING_SNAKE_CASE = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE = kruskal(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_UpperCamelCase ) == sorted(_UpperCamelCase )
| 439 | 1 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 706 |
import csv
import tweepy
# Twitter API credentials
__UpperCAmelCase = ""
__UpperCAmelCase = ""
__UpperCAmelCase = ""
__UpperCAmelCase = ""
def A__ ( __lowerCamelCase ):
# authorize twitter, initialize tweepy
SCREAMING_SNAKE_CASE_ = tweepy.OAuthHandler(__lowerCamelCase, __lowerCamelCase )
auth.set_access_token(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = tweepy.API(__lowerCamelCase )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE_ = api.user_timeline(screen_name=__lowerCamelCase, count=2_00 )
# save most recent tweets
alltweets.extend(__lowerCamelCase )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__lowerCamelCase ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE_ = api.user_timeline(
screen_name=__lowerCamelCase, count=2_00, max_id=__lowerCamelCase )
# save most recent tweets
alltweets.extend(__lowerCamelCase )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ = alltweets[-1].id - 1
print(F'''...{len(__lowerCamelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''', '''w''' ) as f:
SCREAMING_SNAKE_CASE_ = csv.writer(__lowerCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__lowerCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 597 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase : Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase : str = [0, 25, 50]
lowercase : Union[str, Any] = [25, 50, 75]
lowercase : Tuple = fuzz.membership.trimf(X, abca)
lowercase : List[str] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase : Dict = np.ones(75)
lowercase : Union[str, Any] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowercase : Dict = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase : Tuple = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase : int = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase : int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase : str = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 568 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : int = None
lowercase : str = logging.get_logger(__name__)
lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowercase : Dict = {
'''google/rembert''': 2_56,
}
lowercase : Dict = '''▁'''
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = RemBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : Any = do_lower_case
snake_case_ : Dict = remove_space
snake_case_ : Optional[Any] = keep_accents
snake_case_ : Tuple = vocab_file
snake_case_ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
snake_case_ : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 568 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase :int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class _a( __A , unittest.TestCase ):
lowerCamelCase__ :Union[str, Any] = BartphoTokenizer
lowerCamelCase__ :Optional[int] = False
lowerCamelCase__ :Tuple = True
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
_snake_case : Any = ["▁This", "▁is", "▁a", "▁t", "est"]
_snake_case : int = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_snake_case : int = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
_snake_case : Optional[int] = BartphoTokenizer(__snake_case , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , **__snake_case ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase ( self , __snake_case ) -> int:
'''simple docstring'''
_snake_case : List[str] = "This is a là test"
_snake_case : List[Any] = "This is a<unk><unk> test"
return input_text, output_text
def lowercase ( self ) -> int:
'''simple docstring'''
_snake_case : List[str] = BartphoTokenizer(__snake_case , self.monolingual_vocab_file , **self.special_tokens_map )
_snake_case : List[Any] = "This is a là test"
_snake_case : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_snake_case : Optional[Any] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
_snake_case : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case ) | 278 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase :str = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase :Optional[int] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 278 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = tmp_path / '''cache'''
A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = tmp_path / '''cache'''
A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = ParquetDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = tmp_path / '''cache'''
A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A_ = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
A_ = parquet_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [parquet_path]
A_ = tmp_path / '''cache'''
A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A_ = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
A_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = tmp_path / '''cache'''
A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = tmp_path / '''cache'''
A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if split:
A_ = {split: parquet_path}
else:
A_ = '''train'''
A_ = {'''train''': parquet_path, '''test''': parquet_path}
A_ = tmp_path / '''cache'''
A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A_ = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
A_ = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A_ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
A_ = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
A_ = str(shared_datadir / '''test_image_rgb.jpg''' )
A_ = {'''image''': [image_path]}
A_ = Features({'''image''': Image()} )
A_ = Dataset.from_dict(UpperCamelCase__ , features=UpperCamelCase__ )
A_ = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A_ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
A_ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
assert get_writer_batch_size(UpperCamelCase__ ) == expected | 141 | import random
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : List[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase__ )
elif element > pivot:
greater.append(UpperCamelCase__ )
else:
equal.append(UpperCamelCase__ )
return less, equal, greater
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : int ):
'''simple docstring'''
if index >= len(UpperCamelCase__ ) or index < 0:
return None
UpperCamelCase__ = items[random.randint(0, len(UpperCamelCase__ ) - 1 )]
UpperCamelCase__ = 0
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = _partition(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = len(UpperCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase__, UpperCamelCase__ )
# must be in larger
else:
return quick_select(UpperCamelCase__, index - (m + count) )
| 240 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
return self.get_dummy_input()
@property
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=False , ) -> List[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = 4
_lowercase : List[str] = 32
_lowercase : Tuple = (32, 32)
_lowercase : Any = torch.manual_seed(0 )
_lowercase : Tuple = torch.device(UpperCAmelCase__ )
_lowercase : int = (batch_size, num_channels) + sizes
_lowercase : Tuple = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ )
_lowercase : List[str] = {'''hidden_states''': hidden_states}
if include_temb:
_lowercase : Optional[int] = 128
_lowercase : Optional[Any] = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase__ , device=UpperCAmelCase__ )
if include_res_hidden_states_tuple:
_lowercase : Optional[Any] = torch.manual_seed(1 )
_lowercase : int = (randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ ),)
if include_encoder_hidden_states:
_lowercase : Dict = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase__ )
if include_skip_sample:
_lowercase : List[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase__ , device=UpperCAmelCase__ )
return dummy_input
def __UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
_lowercase : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_lowercase : Tuple = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Dict ) -> int:
'''simple docstring'''
_lowercase : List[Any] = self.prepare_init_args_and_inputs_for_common()
_lowercase : List[str] = self.block_class(**UpperCAmelCase__ )
unet_block.to(UpperCAmelCase__ )
unet_block.eval()
with torch.no_grad():
_lowercase : Optional[int] = unet_block(**UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_lowercase : Optional[Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
_lowercase : List[str] = output[0, -1, -3:, -3:]
_lowercase : Optional[Any] = torch.tensor(UpperCAmelCase__ ).to(UpperCAmelCase__ )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase__ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowercase : Any = self.prepare_init_args_and_inputs_for_common()
_lowercase : Optional[int] = self.block_class(**UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
_lowercase : int = model(**UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_lowercase : Optional[int] = output[0]
_lowercase : Dict = torch.device(UpperCAmelCase__ )
_lowercase : Tuple = randn_tensor(output.shape , device=UpperCAmelCase__ )
_lowercase : Union[str, Any] = torch.nn.functional.mse_loss(UpperCAmelCase__ , UpperCAmelCase__ )
loss.backward()
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.