code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
import subprocess
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> str:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Optional[Any] = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowerCAmelCase__ : Tuple = subprocess.run(UpperCamelCase , shell=UpperCamelCase , stdout=subprocess.PIPE )
lowerCAmelCase__ : Optional[int] = output.stdout.decode('''utf-8''' )
lowerCAmelCase__ : Optional[int] = json.loads(UpperCamelCase )
lowerCAmelCase__ : List[Any] = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : str = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __lowerCAmelCase ( UpperCamelCase ) -> int:
return values.split(''',''' )
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowerCAmelCase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 678 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowerCAmelCase ( UpperCamelCase = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 678 | 1 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( a , a , a , a , a ) -> np.ndarray:
A_ : Optional[int] = cva.getAffineTransform(__snake_case , __snake_case )
return cva.warpAffine(__snake_case , __snake_case , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
_lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowerCAmelCase , _lowerCAmelCase = gray_img.shape
# set different points to rotate image
_lowerCAmelCase = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
_lowerCAmelCase = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
_lowerCAmelCase = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
_lowerCAmelCase = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
_lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowerCAmelCase = plt.figure(1)
_lowerCAmelCase = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 707 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '▁'
_lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase = {
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
_lowerCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["""input_ids""", """attention_mask"""]
__magic_name__ = []
__magic_name__ = []
def __init__( self , __magic_name__ , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = None , __magic_name__=None , **__magic_name__ , ):
"""simple docstring"""
A_ : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , tokenizer_file=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
A_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
A_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ : int = 1
A_ : Dict = len(self.sp_model )
A_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
A_ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
A_ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ : Union[str, Any] = src_lang if src_lang is not None else '''en_XX'''
A_ : Tuple = self.lang_code_to_id[self._src_lang]
A_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : int = None
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __magic_name__ ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
A_ : Optional[int] = [1] * len(self.prefix_tokens )
A_ : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
A_ : List[str] = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : str = src_lang
A_ : Tuple = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
A_ : Dict = self.convert_tokens_to_ids(__magic_name__ )
A_ : Any = tgt_lang_id
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : Optional[Any] = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[int] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : Dict = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = "en_XX" , __magic_name__ = None , __magic_name__ = "ro_RO" , **__magic_name__ , ):
"""simple docstring"""
A_ : List[Any] = src_lang
A_ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : int = self.lang_code_to_id[src_lang]
A_ : int = []
A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Union[str, Any] = self.lang_code_to_id[lang]
A_ : Any = []
A_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
| 236 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ = ['''small''', '''medium''', '''large''']
SCREAMING_SNAKE_CASE__ = '''lm_head.decoder.weight'''
SCREAMING_SNAKE_CASE__ = '''lm_head.weight'''
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = torch.load(__UpperCamelCase )
A__ = d.pop(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
torch.save(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE__ = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
SCREAMING_SNAKE_CASE__ = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 |
from __future__ import annotations
import math
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
"""simple docstring"""
snake_case__ : Tuple = u
for i in range(1 , __lowerCAmelCase ):
snake_case__ : Dict = temp * (u - i)
return temp
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Tuple = int(input('''enter the numbers of values: ''' ) )
snake_case__ : list[list[float]] = []
for _ in range(__lowerCAmelCase ):
y.append([] )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
y[i].append(__lowerCAmelCase )
snake_case__ : Union[str, Any] = 0
print('''enter the values of parameters in a list: ''' )
snake_case__ : List[str] = list(map(__lowerCAmelCase , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__lowerCAmelCase ):
snake_case__ : int = float(input() )
snake_case__ : Union[str, Any] = int(input('''enter the value to interpolate: ''' ) )
snake_case__ : List[str] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __lowerCAmelCase ):
for j in range(n - i ):
snake_case__ : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
snake_case__ : str = y[0][0]
for i in range(1 , __lowerCAmelCase ):
summ += (ucal(__lowerCAmelCase , __lowerCAmelCase ) * y[0][i]) / math.factorial(__lowerCAmelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 252 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Any ) -> Any:
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = metric_id
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Any = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCAmelCase ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if "tmp_path" in args:
__a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*lowerCAmelCase__ )
| 65 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['input_features', 'attention_mask']
def __init__( self , _a=80 , _a=16_000 , _a=0.0 , _a=10 , _a=25 , _a="hamming_window" , _a=3_2768.0 , _a=0.97 , _a=1.0 , _a=True , _a=True , _a=False , **_a , ):
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = hop_length
__a = win_length
__a = frame_signal_scale
__a = preemphasis_coeff
__a = mel_floor
__a = normalize_means
__a = normalize_vars
__a = win_function
__a = return_attention_mask
__a = win_length * sampling_rate // 1_000
__a = hop_length * sampling_rate // 1_000
__a = optimal_fft_length(self.sample_size )
__a = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self , _a ):
if self.win_function == "hamming_window":
__a = window_function(window_length=self.sample_size , name=self.win_function , periodic=_a )
else:
__a = window_function(window_length=self.sample_size , name=self.win_function )
__a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a = spectrogram(
one_waveform * self.frame_signal_scale , window=_a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_a , preemphasis=self.preemphasis_coeff , mel_filters=_a , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __UpperCAmelCase ( self , _a , _a , _a ):
# make sure we normalize float32 arrays
if self.normalize_means:
__a = x[:input_length].mean(axis=0 )
__a = np.subtract(_a , _a )
if self.normalize_vars:
__a = x[:input_length].std(axis=0 )
__a = np.divide(_a , _a )
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self , _a , _a = None ):
__a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_a , _a , self.padding_value ) for x, n in zip(_a , _a )]
def __call__( self , _a , _a = False , _a = None , _a = False , _a = None , _a = None , _a = None , _a = None , **_a , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
__a = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_mfsc_features(_a ) for one_waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({'''input_features''': features} )
__a = self.pad(
_a , padding=_a , max_length=_a , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=_a , **_a , )
# make sure list is in array format
__a = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _a ):
__a = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
__a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a = [np.asarray(_a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a = (
np.array(_a , dtype=np.intaa )
if self._get_padding_strategies(_a , max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_a )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 65 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Dict = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class a__ ( UpperCamelCase__ ):
a : Optional[Any] = """visual_bert"""
def __init__( self , A=30522 , A=768 , A=512 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=False , A=True , A=1 , A=0 , A=2 , **A , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = visual_embedding_dim
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = type_vocab_size
a = layer_norm_eps
a = bypass_transformer
a = special_visual_initialize
| 515 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> float:
if density <= 0:
raise ValueError("Impossible fluid density")
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus")
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 515 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
lowercase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowercase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowercase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase = [0] * args.vocab_size
for k, v in counter.items():
lowercase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
create_all_state(1 , A , A , [] , A )
return result
def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
lowercase = 4
lowercase = 2
lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __UpperCamelCase :
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : str=6 , lowerCAmelCase : Optional[int]=17 , lowerCAmelCase : Dict=23 , lowerCAmelCase : Any=11 , lowerCAmelCase : Any=True , ):
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = act_dim
UpperCAmelCase_ = state_dim
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = max_length
UpperCAmelCase_ = is_training
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
UpperCAmelCase_ = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __A ( self : List[str] ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __A ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ = DecisionTransformerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowercase , lowercase , lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = DecisionTransformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def __A ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@slow
def __A ( self : Any ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = DecisionTransformerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(lowerCAmelCase )] , lowerCAmelCase )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase_ = 10 # defined by the RL environment, may be normalized
UpperCAmelCase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase_ = model.to(lowerCAmelCase )
UpperCAmelCase_ = model.config
torch.manual_seed(0 )
UpperCAmelCase_ = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase , dtype=torch.floataa ) # env.reset()
UpperCAmelCase_ = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=lowerCAmelCase )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase_ = state
UpperCAmelCase_ = torch.zeros(1 , 0 , config.act_dim , device=lowerCAmelCase , dtype=torch.floataa )
UpperCAmelCase_ = torch.zeros(1 , 0 , device=lowerCAmelCase , dtype=torch.floataa )
UpperCAmelCase_ = torch.tensor(0 , device=lowerCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCAmelCase ):
UpperCAmelCase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCAmelCase )] , dim=1 )
UpperCAmelCase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCAmelCase )] , dim=1 )
UpperCAmelCase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model(
states=lowerCAmelCase , actions=lowerCAmelCase , rewards=lowerCAmelCase , returns_to_go=lowerCAmelCase , timesteps=lowerCAmelCase , attention_mask=lowerCAmelCase , return_dict=lowerCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase_ = action_pred[0, -1]
UpperCAmelCase_ = torch.cat([states, state] , dim=1 )
UpperCAmelCase_ = returns_to_go[0, -1] - reward
UpperCAmelCase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 ) | 162 |
from __future__ import annotations
import pandas as pd
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = [0] * no_of_processes
UpperCAmelCase_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A ):
UpperCAmelCase_ = burst_time[i]
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 999999999
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase_ = remaining_time[j]
UpperCAmelCase_ = j
UpperCAmelCase_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase_ = remaining_time[short]
if minm == 0:
UpperCAmelCase_ = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase_ = False
# Find finish time of current process
UpperCAmelCase_ = increment_time + 1
# Calculate waiting time
UpperCAmelCase_ = finish_time - arrival_time[short]
UpperCAmelCase_ = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase_ = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = [0] * no_of_processes
for i in range(A ):
UpperCAmelCase_ = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(A ):
UpperCAmelCase_ = total_waiting_time + waiting_time[i]
UpperCAmelCase_ = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_a: str = int(input())
_a: List[str] = [0] * no_of_processes
_a: Dict = [0] * no_of_processes
_a: Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_a , _a: List[str] = map(int, input().split())
_a: List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_a: Optional[int] = burst_time
_a: List[Any] = no_of_processes
_a: List[Any] = waiting_time
_a: List[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_a: Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs) | 162 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : str =logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_lowercase ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase : int = deprecated_arg[3:]
_lowerCamelCase : List[Any] = not kwargs.pop(_lowercase )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_lowerCamelCase : Dict = kwargs.pop('''tpu_name''' , self.tpu_name )
_lowerCamelCase : Optional[Any] = kwargs.pop('''device_idx''' , self.device_idx )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''eager_mode''' , self.eager_mode )
_lowerCamelCase : int = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_lowercase )
__snake_case = field(
default=a_ , metadata={"""help""": """Name of TPU"""} , )
__snake_case = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
__snake_case = field(default=a_ , metadata={"""help""": """Benchmark models in eager model."""} )
__snake_case = field(
default=a_ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def a__ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['''tf'''] )
_lowerCamelCase : List[str] = None
if self.tpu:
try:
if self.tpu_name:
_lowerCamelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCamelCase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCamelCase : int = None
return tpu
@cached_property
def a__ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCamelCase : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
_lowerCamelCase : str = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
_lowerCamelCase : Tuple = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def a__ ( self ) -> bool:
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def a__ ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def a__ ( self ) -> str:
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def a__ ( self ) -> int:
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def a__ ( self ) -> bool:
return self.n_gpu > 0
| 558 | """simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ) -> List[Any]:
_lowerCamelCase : Tuple = 1.0 if scale is None else scale
_lowerCamelCase : int = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def a__ ( self ) -> Dict:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self ) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self ) -> Union[str, Any]:
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> None:
super().__init__(**_lowercase )
_lowerCamelCase : Union[str, Any] = args_dim
_lowerCamelCase : Union[str, Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
_lowerCamelCase : str = domain_map
def a__ ( self , _lowercase ) -> Tuple[torch.Tensor]:
_lowerCamelCase : Any = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
super().__init__()
_lowerCamelCase : Optional[Any] = function
def a__ ( self , _lowercase , *_lowercase ) -> str:
return self.function(_lowercase , *_lowercase )
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = 42
__snake_case = 42
def __init__( self , _lowercase = 1 ) -> None:
_lowerCamelCase : int = dim
_lowerCamelCase : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self , _lowercase ) -> Dict:
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None , ) -> Distribution:
_lowerCamelCase : Any = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def a__ ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self ) -> int:
return len(self.event_shape )
@property
def a__ ( self ) -> float:
return 0.0
def a__ ( self , _lowercase ) -> nn.Module:
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self , *_lowercase ) -> int:
raise NotImplementedError()
@staticmethod
def a__ ( _lowercase ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"df": 1, "loc": 1, "scale": 1}
__snake_case = StudentT
@classmethod
def a__ ( cls , _lowercase , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
_lowerCamelCase : List[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"loc": 1, "scale": 1}
__snake_case = Normal
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"total_count": 1, "logits": 1}
__snake_case = NegativeBinomial
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> int:
_lowerCamelCase : str = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self , _lowercase ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : Optional[int] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 558 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] ={
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """falcon"""
__snake_case = ["""past_key_values"""]
def __init__( self , _lowercase=65024 , _lowercase=4544 , _lowercase=32 , _lowercase=71 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=11 , _lowercase=11 , **_lowercase , ) -> int:
_lowerCamelCase : Any = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCamelCase : Optional[int] = kwargs.pop('''n_embed''' , _lowercase )
_lowerCamelCase : str = hidden_size if n_embed is None else n_embed
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : List[str] = layer_norm_epsilon
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Tuple = use_cache
_lowerCamelCase : List[Any] = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Any = bos_token_id
_lowerCamelCase : Dict = eos_token_id
_lowerCamelCase : Optional[int] = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCamelCase : Optional[int] = alibi
_lowerCamelCase : Dict = new_decoder_architecture
_lowerCamelCase : List[Any] = multi_query # Ignored when new_decoder_architecture is True
_lowerCamelCase : Any = parallel_attn
_lowerCamelCase : Optional[Any] = bias
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
@property
def a__ ( self ) -> Union[str, Any]:
return self.hidden_size // self.num_attention_heads
@property
def a__ ( self ) -> Optional[Any]:
return not self.alibi
| 434 | """simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"""{test_file} instead.""" )
snake_case : Optional[int] = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
snake_case : Any = components[:-1] + [test_fn.replace(""".py""" ,"""""" )]
snake_case : Dict = """.""".join(_lowerCamelCase )
return test_module_path
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Tuple = get_module_path(_lowerCamelCase )
snake_case : Tuple = importlib.import_module(_lowerCamelCase )
return test_module
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : List[str] = []
snake_case : List[Any] = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCamelCase ,_lowerCamelCase ) )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
snake_case : str = []
snake_case : Optional[int] = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
snake_case : Any = getattr(_lowerCamelCase ,_lowerCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case : Tuple = getattr(_lowerCamelCase ,"""all_model_classes""" ,[] )
if len(_lowerCamelCase ) > 0:
test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : List[str] = get_test_classes(_lowerCamelCase )
snake_case : Dict = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Optional[Any] = test_class()
if hasattr(_lowerCamelCase ,"""setUp""" ):
test.setUp()
snake_case : Any = None
if hasattr(_lowerCamelCase ,"""model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case : Optional[Any] = test.model_tester.__class__
return model_tester
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]:
snake_case : Tuple = get_test_classes(_lowerCamelCase )
snake_case : Any = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
snake_case : Tuple = get_test_classes_for_model(_lowerCamelCase ,_lowerCamelCase )
snake_case : str = []
for test_class in test_classes:
snake_case : Dict = get_model_tester_from_test_class(_lowerCamelCase )
if tester_class is not None:
tester_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
snake_case : Any = get_test_classes(_lowerCamelCase )
snake_case : str = {test_class: get_model_tester_from_test_class(_lowerCamelCase ) for test_class in test_classes}
return test_tester_mapping
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Optional[Any] = get_model_classes(_lowerCamelCase )
snake_case : Optional[int] = {
model_class: get_test_classes_for_model(_lowerCamelCase ,_lowerCamelCase ) for model_class in model_classes
}
return model_test_mapping
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Optional[int] = get_model_classes(_lowerCamelCase )
snake_case : Optional[int] = {
model_class: get_tester_classes_for_model(_lowerCamelCase ,_lowerCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
return o
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
return o.__name__
elif isinstance(_lowerCamelCase ,(list, tuple) ):
return [to_json(_lowerCamelCase ) for x in o]
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
return {to_json(_lowerCamelCase ): to_json(_lowerCamelCase ) for k, v in o.items()}
else:
return o
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( a_ : Any , a_ : int , a_ : List[Any] , a_ : List[str] ) -> Dict:
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE :Any = FunnelConfig.from_json_file(a_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = FunnelBaseModel(a_ ) if base_model else FunnelModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(a_ , a_ , a_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
lowerCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
) | 498 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _SCREAMING_SNAKE_CASE( A , A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
@register_to_config
def __init__( self ,SCREAMING_SNAKE_CASE__=20_00 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=20 ,SCREAMING_SNAKE_CASE__=1E-3 ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = None
__SCREAMING_SNAKE_CASE :Dict = None
__SCREAMING_SNAKE_CASE :int = None
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = torch.linspace(1 ,self.config.sampling_eps ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__SCREAMING_SNAKE_CASE :int = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__SCREAMING_SNAKE_CASE :str = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__SCREAMING_SNAKE_CASE :Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__SCREAMING_SNAKE_CASE :Dict = std.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE :Tuple = -score / std
# compute
__SCREAMING_SNAKE_CASE :Union[str, Any] = -1.0 / len(self.timesteps )
__SCREAMING_SNAKE_CASE :List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__SCREAMING_SNAKE_CASE :List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__SCREAMING_SNAKE_CASE :Any = beta_t.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE :List[str] = -0.5 * beta_t * x
__SCREAMING_SNAKE_CASE :Dict = torch.sqrt(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = drift - diffusion**2 * score
__SCREAMING_SNAKE_CASE :Any = x + drift * dt
# add noise
__SCREAMING_SNAKE_CASE :Optional[int] = randn_tensor(x.shape ,layout=x.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=x.device ,dtype=x.dtype )
__SCREAMING_SNAKE_CASE :str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps | 498 | 1 |
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> str:
"""simple docstring"""
return " ".join(
"""""".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 199 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 199 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = len(_lowercase )
lowerCAmelCase__ : str = len(_lowercase )
lowerCAmelCase__ : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCAmelCase__ : List[Any] = True
for i in range(_lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCAmelCase__ : List[str] = True
if a[i].islower():
lowerCAmelCase__ : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 453 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __A ( self: Tuple ) -> Dict:
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , '''width_multiplier''' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Dict , __A: Optional[Any] , __A: Optional[int]=13 , __A: Union[str, Any]=64 , __A: Dict=2 , __A: str=3 , __A: Dict="swish" , __A: List[str]=3 , __A: Union[str, Any]=32 , __A: str=0.1 , __A: int=0.02 , __A: Optional[Any]=True , __A: str=True , __A: List[Any]=10 , __A: Dict=None , __A: Optional[Any]=0.25 , __A: Optional[int]=0.0 , __A: Tuple=0.0 , ) -> Optional[int]:
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = make_divisible(5_12 * width_multiplier , divisor=8 )
_A = hidden_act
_A = conv_kernel_size
_A = output_stride
_A = classifier_dropout_prob
_A = use_labels
_A = is_training
_A = num_labels
_A = initializer_range
_A = scope
_A = width_multiplier
_A = ffn_dropout
_A = attn_dropout
def __A ( self: Dict ) -> List[str]:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self: Tuple ) -> Optional[Any]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __A ( self: Dict , __A: Union[str, Any] , __A: int , __A: Dict , __A: List[str] ) -> str:
_A = MobileViTVaModel(config=__A )
model.to(__A )
model.eval()
_A = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self: str , __A: int , __A: Optional[Any] , __A: int , __A: Tuple ) -> Any:
_A = self.num_labels
_A = MobileViTVaForImageClassification(__A )
model.to(__A )
model.eval()
_A = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self: List[Any] , __A: Optional[Any] , __A: Tuple , __A: int , __A: List[Any] ) -> Optional[Any]:
_A = self.num_labels
_A = MobileViTVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
_A = model(__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_A = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self: Dict ) -> List[Any]:
_A = self.prepare_config_and_inputs()
_A ,_A ,_A ,_A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: str ) -> Optional[Any]:
_A = MobileViTVaModelTester(self )
_A = MobileViTVaConfigTester(self , config_class=__A , has_text_modality=__A )
def __A ( self: Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __A ( self: Any ) -> List[str]:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __A ( self: int ) -> Any:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __A ( self: Optional[Any] ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __A ( self: Any ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: Optional[int] ) -> List[str]:
pass
def __A ( self: List[Any] ) -> Optional[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def __A ( self: List[str] ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __A ( self: str ) -> int:
def check_hidden_states_output(__A: List[str] , __A: str , __A: Optional[int] ):
_A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(__A , __A ) )
_A = outputs.hidden_states
_A = 5
self.assertEqual(len(__A ) , __A )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_A = 2
for i in range(len(__A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__A , __A , __A )
def __A ( self: str ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __A ( self: int ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __A ( self: Dict ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MobileViTVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( ):
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: int ) -> Optional[Any]:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __A ( self: Optional[Any] ) -> Optional[int]:
_A = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__A )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A = model(**__A )
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __A )
_A = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
def __A ( self: List[str] ) -> Tuple:
_A = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_A = model.to(__A )
_A = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A = model(**__A )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __A )
_A = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4 ) )
@slow
def __A ( self: List[Any] ) -> Optional[int]:
_A = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_A = model.to(__A )
_A = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A = model(**__A )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(50, 60)] )
_A = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __A )
_A = image_processor.post_process_semantic_segmentation(outputs=__A )
_A = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __A )
| 484 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowercase ( _a ,_a ,_a ) -> Any:
UpperCAmelCase_: int = os.path.abspath(_a )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
UpperCAmelCase_: int = tf.train.list_variables(_a )
UpperCAmelCase_: Any = []
UpperCAmelCase_: int = []
UpperCAmelCase_: int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCAmelCase_: List[Any] = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCAmelCase_: List[Any] = name[1:]
# figure out how many levels deep the name is
UpperCAmelCase_: Dict = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(_a )
# read data
UpperCAmelCase_: Any = tf.train.load_variable(_a ,_a )
names.append("/".join(_a ) )
arrays.append(_a )
logger.info(f"Read a total of {len(_a ):,} layers" )
# Sanity check
if len(set(_a ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(_a ) )})" )
UpperCAmelCase_: Optional[int] = list(set(_a ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(_a ,_a ):
UpperCAmelCase_: Optional[Any] = full_name.split("/" )
UpperCAmelCase_: int = model
UpperCAmelCase_: List[str] = []
for i, m_name in enumerate(_a ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
UpperCAmelCase_: str = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
UpperCAmelCase_: Dict = getattr(_a ,"embeddings" )
UpperCAmelCase_: str = getattr(_a ,"LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
UpperCAmelCase_: str = getattr(_a ,"encoder" )
UpperCAmelCase_: Optional[Any] = getattr(_a ,"layer" )
UpperCAmelCase_: str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
UpperCAmelCase_: Optional[Any] = getattr(_a ,"pooler" )
UpperCAmelCase_: Optional[int] = getattr(_a ,"dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
UpperCAmelCase_: int = getattr(_a ,"embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
UpperCAmelCase_: str = getattr(_a ,"word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
UpperCAmelCase_: Union[str, Any] = getattr(_a ,"position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
UpperCAmelCase_: Tuple = getattr(_a ,"token_type_embeddings" )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append("weight" )
UpperCAmelCase_: int = getattr(_a ,"weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
UpperCAmelCase_: Optional[Any] = getattr(_a ,"attention" )
UpperCAmelCase_: str = getattr(_a ,"self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
UpperCAmelCase_: Union[str, Any] = getattr(_a ,"attention" )
UpperCAmelCase_: List[Any] = getattr(_a ,"output" )
UpperCAmelCase_: Optional[int] = getattr(_a ,"LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
UpperCAmelCase_: Optional[Any] = getattr(_a ,"attention" )
UpperCAmelCase_: str = getattr(_a ,"output" )
UpperCAmelCase_: Dict = getattr(_a ,"dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
UpperCAmelCase_: str = getattr(_a ,"output" )
UpperCAmelCase_: Union[str, Any] = getattr(_a ,"dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
UpperCAmelCase_: List[Any] = getattr(_a ,"output" )
UpperCAmelCase_: List[str] = getattr(_a ,"LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
UpperCAmelCase_: Tuple = getattr(_a ,"key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
UpperCAmelCase_: Union[str, Any] = getattr(_a ,"query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
UpperCAmelCase_: List[Any] = getattr(_a ,"value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
UpperCAmelCase_: Tuple = getattr(_a ,"intermediate" )
UpperCAmelCase_: Union[str, Any] = getattr(_a ,"dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
UpperCAmelCase_: Optional[Any] = getattr(_a ,"output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
UpperCAmelCase_: Any = getattr(_a ,"bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
UpperCAmelCase_: Optional[int] = getattr(_a ,"weight" )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
UpperCAmelCase_: Tuple = ".".join(_a )
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" ,_a ) or re.match(
r"(\S+)\.attention\.output\.dense\.weight" ,_a ):
UpperCAmelCase_: List[Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCAmelCase_: Union[str, Any] = array.transpose()
if pointer.shape == array.shape:
UpperCAmelCase_: int = torch.from_numpy(_a )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def lowercase ( _a ,_a ,_a ) -> List[str]:
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
UpperCAmelCase_: Union[str, Any] = BertConfig.from_json_file(_a )
UpperCAmelCase_: str = BertModel(_a )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(_a ,_a ,_a )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() ,_a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
_lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 709 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = 1
UpperCAmelCase_: Optional[Any] = 3
UpperCAmelCase_: Optional[int] = (32, 32)
UpperCAmelCase_: int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A__ )
return image
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: str = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Union[str, Any] = self.dummy_vae
UpperCAmelCase_: Optional[int] = self.dummy_text_encoder
UpperCAmelCase_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Dict = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[int] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Union[str, Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: int = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Optional[Any] = output.images
UpperCAmelCase_: List[str] = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: List[Any] = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=A__ , )[0]
UpperCAmelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_: List[str] = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_: int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_: List[str] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Union[str, Any] = DDPMScheduler()
UpperCAmelCase_: Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Any = self.dummy_text_encoder
UpperCAmelCase_: Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: List[str] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: str = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Union[str, Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Any = output.images
assert image.shape[0] == 2
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: Any = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Dict = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: int = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Dict = self.dummy_text_encoder
UpperCAmelCase_: Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Union[str, Any] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_: List[str] = unet.half()
UpperCAmelCase_: Union[str, Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: List[Any] = torch.manual_seed(0 )
UpperCAmelCase_: str = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_: str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_: Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: List[str] = "a cat sitting on a park bench"
UpperCAmelCase_: Any = torch.manual_seed(0 )
UpperCAmelCase_: Any = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_: Optional[int] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Any = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Any = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case_ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_: List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Tuple = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_: str = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Union[str, Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_: Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 306 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[str] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__SCREAMING_SNAKE_CASE : List[Any] = 4
__SCREAMING_SNAKE_CASE : Dict = 48
__SCREAMING_SNAKE_CASE : Optional[int] = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [6, 6, 6, 6]
__SCREAMING_SNAKE_CASE : int = 60
__SCREAMING_SNAKE_CASE : Union[str, Any] = [6, 6, 6, 6]
__SCREAMING_SNAKE_CASE : Any = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Any = 4
__SCREAMING_SNAKE_CASE : Optional[int] = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : int = 1_26
__SCREAMING_SNAKE_CASE : List[Any] = 7
__SCREAMING_SNAKE_CASE : Any = 2_55.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
return config
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: str ):
if "patch_embed.proj" in name and "layers" not in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
__SCREAMING_SNAKE_CASE : int = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE : List[Any] = """layernorm.weight"""
if name == "norm.bias":
__SCREAMING_SNAKE_CASE : Optional[Any] = """layernorm.bias"""
if "conv_first" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
__SCREAMING_SNAKE_CASE : str = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
__SCREAMING_SNAKE_CASE : str = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
__SCREAMING_SNAKE_CASE : Tuple = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
__SCREAMING_SNAKE_CASE : Any = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = """swin2sr.""" + name
return name
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Optional[int] ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Optional[int] = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
__SCREAMING_SNAKE_CASE : Any = key.split(""".""" )
__SCREAMING_SNAKE_CASE : Any = int(key_split[1] )
__SCREAMING_SNAKE_CASE : Tuple = int(key_split[4] )
__SCREAMING_SNAKE_CASE : List[str] = config.embed_dim
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : List[str] = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : List[Any] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : str = val[-dim:]
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = val
return orig_state_dict
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: int , _lowerCamelCase: List[str] ):
__SCREAMING_SNAKE_CASE : Any = get_config(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = SwinaSRForImageSuperResolution(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE : List[str] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(_lowerCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"Unexpected key {key} in state_dict" )
# verify values
__SCREAMING_SNAKE_CASE : List[Any] = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
__SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE : List[str] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__SCREAMING_SNAKE_CASE : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
__SCREAMING_SNAKE_CASE : int = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__SCREAMING_SNAKE_CASE : Dict = transforms(_lowerCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
__SCREAMING_SNAKE_CASE : List[str] = pixel_values[:, 0, :, :].unsqueeze(1 )
__SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE : int = torch.Size([1, 3, 5_12, 5_12] )
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([1, 3, 10_24, 10_24] )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 10_24, 10_24] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size([1, 3, 5_12, 5_12] )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__SCREAMING_SNAKE_CASE : str = torch.Size([1, 3, 10_24, 10_24] )
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _lowerCamelCase , atol=1E-3 )
print("""Looks ok!""" )
__SCREAMING_SNAKE_CASE : Tuple = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
__SCREAMING_SNAKE_CASE : Optional[int] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub(F"caidas/{model_name}" )
processor.push_to_hub(F"caidas/{model_name}" )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
UpperCamelCase__ : Union[str, Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 578 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: Path , _lowerCamelCase: str = None , _lowerCamelCase: str = None , _lowerCamelCase: str = None , ):
if config_name_or_path is None:
__SCREAMING_SNAKE_CASE : List[str] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Tuple = question_encoder_name_or_path
__SCREAMING_SNAKE_CASE : int = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__SCREAMING_SNAKE_CASE : List[Any] = RagConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = gen_config
__SCREAMING_SNAKE_CASE : Union[str, Any] = question_encoder_config
__SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained_question_encoder_generator(
_lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
rag_model.save_pretrained(_lowerCamelCase )
# Sanity check.
model_class.from_pretrained(_lowerCamelCase )
# Save tokenizers.
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(_lowerCamelCase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
UpperCamelCase__ : Dict = parser.parse_args()
UpperCamelCase__ : Any = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 578 | 1 |
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> Tuple:
# Input as list
A : Optional[Any] =list(poly_a or [0] )[:]
A : int =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
A : List[str] =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
A : Optional[int] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
A : List[Any] =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
A : Optional[Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
A : List[Any] =self.__multiply()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
A : str =[[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
A : List[str] =self.c_max_length // 2
while next_ncol > 0:
A : Any =[[] for i in range(SCREAMING_SNAKE_CASE__ )]
A : Optional[Any] =self.root**next_ncol
# First half of next step
A : Tuple =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
A : Optional[Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
A : Dict =new_dft
A : Optional[int] =next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : List[Any] =self.__dft('A' )
A : Dict =self.__dft('B' )
A : Optional[Any] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
A : Union[str, Any] =2
while next_ncol <= self.c_max_length:
A : Optional[int] =[[] for i in range(SCREAMING_SNAKE_CASE__ )]
A : Any =self.root ** (next_ncol // 2)
A : Union[str, Any] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
A : Any =new_inverse_c
next_ncol *= 2
# Unpack
A : List[Any] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : str ) -> int:
A : List[Any] ='A = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
A : List[str] ='B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
A : List[Any] ='A*B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.0_2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
super().__init__()
A : str =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A : List[Any] =prefix_inner_dim
A : Dict =prefix_hidden_dim
A : List[str] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Dict =GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , )
A : Dict =GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
A : str =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
A : Any =self.encode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.decode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : int =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : Optional[int] =torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.encode_prefix(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
A : Dict =torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 )
A : int =[]
A : Optional[int] =[]
for feature in features:
A : int =self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature
# Only support beam search for now
A , A : Dict =self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : str =torch.stack(SCREAMING_SNAKE_CASE__ )
A : int =torch.stack(SCREAMING_SNAKE_CASE__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Dict:
A : Dict =eos_token_id
A : str =None
A : List[Any] =None
A : List[Any] =torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int )
A : str =torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool )
if input_embeds is not None:
A : Any =input_embeds
else:
A : List[Any] =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Any =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ )
A : str =outputs.logits
A : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] =logits.softmax(-1 ).log()
if scores is None:
A , A : Any =logits.topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Any =generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] )
A , A : Tuple =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : Union[str, Any] =next_tokens
else:
A : str =tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] )
A : Optional[int] =torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] =-float(np.inf )
A : Tuple =0
A : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : int =scores_sum / seq_lengths[:, None]
A , A : Optional[int] =scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Dict =next_tokens // scores_sum.shape[1]
A : Optional[Any] =seq_lengths[next_tokens_source]
A : Tuple =next_tokens % scores_sum.shape[1]
A : Optional[Any] =next_tokens.unsqueeze(1 )
A : Optional[Any] =tokens[next_tokens_source]
A : Any =torch.cat((tokens, next_tokens) , dim=1 )
A : List[str] =generated[next_tokens_source]
A : List[Any] =scores_sum_average * seq_lengths
A : Optional[Any] =is_stopped[next_tokens_source]
A : Optional[int] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Any =torch.cat((generated, next_token_embed) , dim=1 )
A : Optional[int] =is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze()
if is_stopped.all():
break
A : Optional[Any] =scores / seq_lengths
A : str =scores.argsort(descending=SCREAMING_SNAKE_CASE__ )
# tokens tensors are already padded to max_seq_length
A : Optional[Any] =[tokens[i] for i in order]
A : Any =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 )
A : str =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__snake_case :Union[str, Any] =logging.get_logger(__name__)
__snake_case :Dict[Optional[str], Type[Formatter]] ={}
__snake_case :Dict[Optional[str], str] ={}
__snake_case :Dict[Optional[str], Exception] ={}
def lowerCamelCase_ ( lowerCAmelCase__ : type , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[List[str]] = None , ) -> Optional[int]:
'''simple docstring'''
A = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A = format_type
def lowerCamelCase_ ( lowerCAmelCase__ : Exception , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[List[str]] = None ) -> Optional[int]:
'''simple docstring'''
A = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__snake_case :int =ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__snake_case :List[str] =ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__snake_case :Dict =ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[str] ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[str] , **lowerCAmelCase__ : Any ) -> Formatter:
'''simple docstring'''
A = get_format_type_from_alias(lowerCAmelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' ) | 106 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Optional[int] = """altclip_text_model"""
def __init__( self :Tuple , __lowercase :Dict=25_0002 , __lowercase :Union[str, Any]=1024 , __lowercase :Optional[int]=24 , __lowercase :List[Any]=16 , __lowercase :int=4096 , __lowercase :Union[str, Any]="gelu" , __lowercase :Optional[int]=0.1 , __lowercase :Optional[int]=0.1 , __lowercase :Union[str, Any]=514 , __lowercase :Dict=1 , __lowercase :int=0.02 , __lowercase :Optional[int]=0.02 , __lowercase :Optional[Any]=1e-0_5 , __lowercase :str=1 , __lowercase :Tuple=0 , __lowercase :List[str]=2 , __lowercase :str="absolute" , __lowercase :Tuple=True , __lowercase :Optional[int]=768 , **__lowercase :Union[str, Any] , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCamelCase : int =vocab_size
__lowerCamelCase : int =hidden_size
__lowerCamelCase : Dict =num_hidden_layers
__lowerCamelCase : Optional[int] =num_attention_heads
__lowerCamelCase : str =hidden_act
__lowerCamelCase : Optional[Any] =intermediate_size
__lowerCamelCase : Tuple =hidden_dropout_prob
__lowerCamelCase : str =attention_probs_dropout_prob
__lowerCamelCase : Optional[int] =max_position_embeddings
__lowerCamelCase : Dict =type_vocab_size
__lowerCamelCase : Tuple =initializer_range
__lowerCamelCase : int =initializer_factor
__lowerCamelCase : List[Any] =layer_norm_eps
__lowerCamelCase : List[Any] =position_embedding_type
__lowerCamelCase : str =use_cache
__lowerCamelCase : str =project_dim
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[Any] = """altclip_vision_model"""
def __init__( self :Dict , __lowercase :Optional[int]=768 , __lowercase :Dict=3072 , __lowercase :Dict=512 , __lowercase :Optional[Any]=12 , __lowercase :Tuple=12 , __lowercase :Optional[int]=3 , __lowercase :Any=224 , __lowercase :List[Any]=32 , __lowercase :Optional[Any]="quick_gelu" , __lowercase :Optional[int]=1e-5 , __lowercase :List[Any]=0.0 , __lowercase :Dict=0.02 , __lowercase :Optional[int]=1.0 , **__lowercase :Dict , ):
super().__init__(**__lowercase )
__lowerCamelCase : Tuple =hidden_size
__lowerCamelCase : List[Any] =intermediate_size
__lowerCamelCase : int =projection_dim
__lowerCamelCase : Union[str, Any] =num_hidden_layers
__lowerCamelCase : Optional[int] =num_attention_heads
__lowerCamelCase : Tuple =num_channels
__lowerCamelCase : str =patch_size
__lowerCamelCase : str =image_size
__lowerCamelCase : str =initializer_range
__lowerCamelCase : Optional[int] =initializer_factor
__lowerCamelCase : int =attention_dropout
__lowerCamelCase : Dict =layer_norm_eps
__lowerCamelCase : Optional[Any] =hidden_act
@classmethod
def __lowercase ( cls :str , __lowercase :Union[str, os.PathLike] , **__lowercase :Dict ):
cls._set_token_in_kwargs(__lowercase )
__lowerCamelCase , __lowerCamelCase : List[Any] =cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
__lowerCamelCase : str =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowercase , **__lowercase )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Dict = """altclip"""
__snake_case : Optional[Any] = True
def __init__( self :Any , __lowercase :Union[str, Any]=None , __lowercase :str=None , __lowercase :Tuple=768 , __lowercase :Any=2.6592 , **__lowercase :Any ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__lowerCamelCase : Union[str, Any] =kwargs.pop('''text_config_dict''' , __lowercase )
__lowerCamelCase : List[str] =kwargs.pop('''vision_config_dict''' , __lowercase )
super().__init__(**__lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__lowerCamelCase : Any ={}
# This is the complete result when using `text_config_dict`.
__lowerCamelCase : Any =AltCLIPTextConfig(**__lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__lowerCamelCase : Dict =(
f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
f'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCamelCase : Dict =(
f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
f'value `text_config["{key}"]` will be overriden.'
)
logger.warning(__lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__lowerCamelCase : int ={}
# This is the complete result when using `vision_config_dict`.
__lowerCamelCase : Dict =AltCLIPVisionConfig(**__lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__lowerCamelCase : Any ={
str(__lowercase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__lowerCamelCase : List[str] =(
f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCamelCase : Union[str, Any] =(
f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
f'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(__lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__lowerCamelCase : Any ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
__lowerCamelCase : int ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
__lowerCamelCase : Any =AltCLIPTextConfig(**__lowercase )
__lowerCamelCase : Union[str, Any] =AltCLIPVisionConfig(**__lowercase )
__lowerCamelCase : Tuple =projection_dim
__lowerCamelCase : Tuple =logit_scale_init_value
__lowerCamelCase : Tuple =1.0
@classmethod
def __lowercase ( cls :Union[str, Any] , __lowercase :AltCLIPTextConfig , __lowercase :AltCLIPVisionConfig , **__lowercase :List[str] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowercase )
def __lowercase ( self :List[str] ):
__lowerCamelCase : int =copy.deepcopy(self.__dict__ )
__lowerCamelCase : int =self.text_config.to_dict()
__lowerCamelCase : int =self.vision_config.to_dict()
__lowerCamelCase : Optional[int] =self.__class__.model_type
return output
| 179 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Dict = TaConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase__ :Dict = TaForConditionalGeneration(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 433 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : int = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
__snake_case : Tuple = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
__snake_case : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Dict = VOCAB_FILES_NAMES
__a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = BertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) ->Tuple:
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
UpperCAmelCase__ :Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A ) != do_lower_case
or normalizer_state.get('strip_accents' , A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A ) != tokenize_chinese_chars
):
UpperCAmelCase__ :Any = getattr(A , normalizer_state.pop('type' ) )
UpperCAmelCase__ :Any = do_lower_case
UpperCAmelCase__ :Tuple = strip_accents
UpperCAmelCase__ :List[Any] = tokenize_chinese_chars
UpperCAmelCase__ :Dict = normalizer_class(**A )
UpperCAmelCase__ :Dict = do_lower_case
def A__ ( self , A , A=None ) ->Optional[int]:
UpperCAmelCase__ :str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , A , A = None ) ->List[int]:
UpperCAmelCase__ :int = [self.sep_token_id]
UpperCAmelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , A , A = None ) ->Tuple[str]:
UpperCAmelCase__ :str = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 433 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : str = XLNetTokenizer
snake_case__ : Any = XLNetTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : str = True
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
a_ : Any = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : List[Any] = '<s>'
a_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_0_0_6 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Optional[Any] = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
a_ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
a_ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a_ : List[str] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
a_ : List[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : Union[str, Any] = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : Tuple = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
a_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
a_ : Optional[int] = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
a_ : Any = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
a_ : Dict = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
# fmt: off
a_ : Optional[Any] = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 570 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> bool:
"""simple docstring"""
a_ : Dict = get_failure_array(__A )
# 2) Step through text searching for pattern
a_ , a_ : Optional[int] = 0, 0 # index into text, pattern
while i < len(__A ):
if pattern[j] == text[i]:
if j == (len(__A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a_ : int = failure[j - 1]
continue
i += 1
return False
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> list[int]:
"""simple docstring"""
a_ : Any = [0]
a_ : Optional[Any] = 0
a_ : int = 1
while j < len(__A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a_ : Dict = failure[i - 1]
continue
j += 1
failure.append(__A )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase_ : str = 'abc1abc12'
UpperCAmelCase_ : Any = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
UpperCAmelCase_ : Dict = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase_ : Any = 'ABABX'
UpperCAmelCase_ : Optional[int] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase_ : Dict = 'AAAB'
UpperCAmelCase_ : Tuple = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase_ : Any = 'abcdabcy'
UpperCAmelCase_ : Union[str, Any] = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase_ : Dict = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 570 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[Any] ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Dict ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : List[str] , **__UpperCamelCase : List[str] ) -> str:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Dict , *__UpperCamelCase : Any , **__UpperCamelCase : Dict ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Tuple , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : int ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[str] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[Any] ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Any ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[Any] ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[str] , *__UpperCamelCase : int , **__UpperCamelCase : Optional[Any] ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : int , **__UpperCamelCase : int ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : str , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Dict , *__UpperCamelCase : Dict , **__UpperCamelCase : str ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[str] ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ) -> Dict:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : int , *__UpperCamelCase : Dict , **__UpperCamelCase : str ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[Any] ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Dict , *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Any ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : List[str] ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : int ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : Dict , **__UpperCamelCase : Any ) -> str:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : int , *__UpperCamelCase : Any , **__UpperCamelCase : str ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
| 702 | """simple docstring"""
import cmath
import math
def lowercase ( a__ : float , a__ : float , a__ : float , a__ : float ) -> complex:
_UpperCamelCase = math.radians(a__ )
_UpperCamelCase = math.radians(a__ )
# Convert voltage and current to rectangular form
_UpperCamelCase = cmath.rect(a__ , a__ )
_UpperCamelCase = cmath.rect(a__ , a__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 0 |
"""simple docstring"""
a_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 480 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Optional[Any] = CTRLTokenizer
_A : Dict = False
_A : Any = False
def __UpperCamelCase (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Tuple = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
snake_case_ : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : List[str] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
snake_case_ : Tuple = {"""unk_token""": """<unk>"""}
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase__ ) )
def __UpperCamelCase (self , **lowercase__ ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = """adapt react readapt apt"""
snake_case_ : Tuple = """adapt react readapt apt"""
return input_text, output_text
def __UpperCamelCase (self ):
snake_case_ : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Tuple = """adapt react readapt apt"""
snake_case_ : List[str] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
snake_case_ : List[str] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
| 480 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _A :
'''simple docstring'''
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def _snake_case ( self : List[str] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _snake_case ( self : Any ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = torch.arange(self.height * self.width )
__lowercase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase , *__lowercase = self.shape
__lowercase = int(np.prod(lowerCamelCase ) )
__lowercase = self.get_image_coords()
__lowercase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowercase = self.get_camera_rays(lowerCamelCase )
__lowercase = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _snake_case ( self : Optional[int] , lowerCamelCase : torch.Tensor ):
'''simple docstring'''
__lowercase , *__lowercase , __lowercase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowercase = coords.view(lowerCamelCase , -1 , 2 )
__lowercase = self.resolution()
__lowercase = self.fov()
__lowercase = (flat.float() / (res - 1)) * 2 - 1
__lowercase = fracs * torch.tan(fov / 2 )
__lowercase = fracs.view(lowerCamelCase , -1 , 2 )
__lowercase = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__lowercase = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__lowercase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def _snake_case ( self : Any , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__lowercase = np.array([np.sin(_SCREAMING_SNAKE_CASE ), np.cos(_SCREAMING_SNAKE_CASE ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowercase = -z * 4
__lowercase = np.array([np.cos(_SCREAMING_SNAKE_CASE ), -np.sin(_SCREAMING_SNAKE_CASE ), 0.0] )
__lowercase = np.cross(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
origins.append(_SCREAMING_SNAKE_CASE )
xs.append(_SCREAMING_SNAKE_CASE )
ys.append(_SCREAMING_SNAKE_CASE )
zs.append(_SCREAMING_SNAKE_CASE )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , width=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_SCREAMING_SNAKE_CASE )) , )
| 715 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _A ( ctypes.Structure ):
'''simple docstring'''
_snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 655 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class a__ :
def __init__( self :List[str] , _lowerCamelCase :str = "cpu" , _lowerCamelCase :str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
UpperCamelCase_ : Any =device
UpperCamelCase_ : Any =CLIPTokenizerFast.from_pretrained(_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =[0.48145466, 0.4578275, 0.40821073]
UpperCamelCase_ : Optional[Any] =[0.26862954, 0.26130258, 0.27577711]
UpperCamelCase_ : int =torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCamelCase_ : Tuple =torchvision.transforms.Resize(224 )
UpperCamelCase_ : Dict =torchvision.transforms.CenterCrop(224 )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Dict =self.resize(_lowerCamelCase )
UpperCamelCase_ : Dict =self.center_crop(_lowerCamelCase )
UpperCamelCase_ : Tuple =self.normalize(_lowerCamelCase )
return images
def __call__( self :Any , _lowerCamelCase :int=None , _lowerCamelCase :str=None , **_lowerCamelCase :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =self.tokenizer(text=_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : int =self.preprocess_img(_lowerCamelCase )
UpperCamelCase_ : str ={key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class a__ ( nn.Module ):
def __init__( self :str , _lowerCamelCase :str=10 , _lowerCamelCase :Dict=0.01 , _lowerCamelCase :Optional[int]=None , _lowerCamelCase :int=None , _lowerCamelCase :Any=None , _lowerCamelCase :Union[str, Any]=None , _lowerCamelCase :Dict=None , _lowerCamelCase :List[str]=None , _lowerCamelCase :Dict=False , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :str="image" , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :int=False , _lowerCamelCase :Optional[Any]=False , _lowerCamelCase :str=False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase_ : Union[str, Any] =None
UpperCamelCase_ : Any =device if device else get_device()
if vqgan:
UpperCamelCase_ : str =vqgan
else:
UpperCamelCase_ : Optional[int] =load_vqgan(self.device , conf_path=_lowerCamelCase , ckpt_path=_lowerCamelCase )
self.vqgan.eval()
if clip:
UpperCamelCase_ : Optional[int] =clip
else:
UpperCamelCase_ : str =CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
UpperCamelCase_ : Optional[Any] =ProcessorGradientFlow(device=self.device )
UpperCamelCase_ : str =iterations
UpperCamelCase_ : Union[str, Any] =lr
UpperCamelCase_ : Tuple =log
UpperCamelCase_ : List[str] =make_grid
UpperCamelCase_ : List[str] =return_val
UpperCamelCase_ : List[Any] =quantize
UpperCamelCase_ : Dict =self.vqgan.decoder.z_shape
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :Dict=None , _lowerCamelCase :str=None , _lowerCamelCase :str=5 , _lowerCamelCase :Optional[Any]=True ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =[]
if output_path is None:
UpperCamelCase_ : Any ='./animation.gif'
if input_path is None:
UpperCamelCase_ : Dict =self.save_path
UpperCamelCase_ : Dict =sorted(glob(input_path + '/*' ) )
if not len(_lowerCamelCase ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(_lowerCamelCase ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
UpperCamelCase_ : List[str] =total_duration / len(_lowerCamelCase )
UpperCamelCase_ : str =[frame_duration] * len(_lowerCamelCase )
if extend_frames:
UpperCamelCase_ : int =1.5
UpperCamelCase_ : Dict =3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(_lowerCamelCase ) )
imageio.mimsave(_lowerCamelCase , _lowerCamelCase , duration=_lowerCamelCase )
print(f'''gif saved to {output_path}''' )
def lowerCamelCase_ ( self :int , _lowerCamelCase :str=None , _lowerCamelCase :int=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
UpperCamelCase_ : Any =preprocess(Image.open(_lowerCamelCase ) , target_image_size=256 ).to(self.device )
UpperCamelCase_ : Union[str, Any] =preprocess_vqgan(_lowerCamelCase )
UpperCamelCase_ , *UpperCamelCase_ : List[str] =self.vqgan.encode(_lowerCamelCase )
return z
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self.latent.detach().requires_grad_()
UpperCamelCase_ : List[str] =base_latent + transform_vector
if self.quantize:
UpperCamelCase_ , *UpperCamelCase_ : Dict =self.vqgan.quantize(_lowerCamelCase )
else:
UpperCamelCase_ : Any =trans_latent
return self.vqgan.decode(_lowerCamelCase )
def lowerCamelCase_ ( self :int , _lowerCamelCase :Any , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Any=None ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.clip_preprocessor(text=_lowerCamelCase , images=_lowerCamelCase , return_tensors='pt' , padding=_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =self.clip(**_lowerCamelCase )
UpperCamelCase_ : Optional[int] =clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase_ : Dict =similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :Dict , _lowerCamelCase :List[Any] , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : int =self._get_clip_similarity(pos_prompts['prompts'] , _lowerCamelCase , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
UpperCamelCase_ : Any =self._get_clip_similarity(neg_prompts['prompts'] , _lowerCamelCase , weights=neg_prompts['weights'] )
else:
UpperCamelCase_ : Optional[Any] =torch.tensor([1] , device=self.device )
UpperCamelCase_ : Optional[Any] =-torch.log(_lowerCamelCase ) + torch.log(_lowerCamelCase )
return loss
def lowerCamelCase_ ( self :str , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Dict , _lowerCamelCase :Any ):
'''simple docstring'''
UpperCamelCase_ : str =torch.randn_like(self.latent , requires_grad=_lowerCamelCase , device=self.device )
UpperCamelCase_ : str =torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase_ : str =self._add_vector(_lowerCamelCase )
UpperCamelCase_ : int =loop_post_process(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =self._get_CLIP_loss(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print('CLIP loss' , _lowerCamelCase )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=_lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Dict , _lowerCamelCase :Tuple , _lowerCamelCase :Tuple ):
'''simple docstring'''
wandb.init(reinit=_lowerCamelCase , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
UpperCamelCase_ : Tuple =Image.open(_lowerCamelCase )
UpperCamelCase_ : List[str] =image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(_lowerCamelCase ) )
def lowerCamelCase_ ( self :int , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
if not prompts:
return []
UpperCamelCase_ : str =[]
UpperCamelCase_ : int =[]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] =[prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(_lowerCamelCase , (tuple, list) ):
UpperCamelCase_ : int =prompt[0]
UpperCamelCase_ : Any =float(prompt[1] )
elif ":" in prompt:
UpperCamelCase_ , UpperCamelCase_ : str =prompt.split(':' )
UpperCamelCase_ : List[Any] =float(_lowerCamelCase )
else:
UpperCamelCase_ : List[str] =prompt
UpperCamelCase_ : int =1.0
processed_prompts.append(_lowerCamelCase )
weights.append(_lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_lowerCamelCase , device=self.device ),
}
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int , _lowerCamelCase :int=None , _lowerCamelCase :str=None , _lowerCamelCase :Dict=True , _lowerCamelCase :Union[str, Any]=False , _lowerCamelCase :int=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :Union[str, Any]=None , ):
'''simple docstring'''
if image_path:
UpperCamelCase_ : Optional[Any] =self._get_latent(_lowerCamelCase )
else:
UpperCamelCase_ : List[str] =torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase_ : Tuple =self.process_prompts(_lowerCamelCase )
UpperCamelCase_ : str =self.process_prompts(_lowerCamelCase )
if save_final and save_path is None:
UpperCamelCase_ : Union[str, Any] =os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
else:
UpperCamelCase_ : Tuple =save_path + '_' + get_timestamp()
os.makedirs(_lowerCamelCase )
UpperCamelCase_ : int =save_path
UpperCamelCase_ : Union[str, Any] =self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(_lowerCamelCase ) )
UpperCamelCase_ : int =loop_post_process(_lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ):
if show_intermediate:
show_pil(_lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'Image': wandb.Image(_lowerCamelCase )} )
if show_final:
show_pil(_lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 357 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def A_ ( __lowercase , __lowercase , __lowercase = 1 , __lowercase = 1 , __lowercase = 1.0e4 , __lowercase = False , __lowercase = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
UpperCamelCase_ : Optional[int] =float(embedding_dim // 2 )
UpperCamelCase_ : Optional[Any] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCamelCase_ : List[Any] =min_timescale * jnp.exp(jnp.arange(__lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCamelCase_ : int =jnp.expand_dims(__lowercase , 1 ) * jnp.expand_dims(__lowercase , 0 )
# scale embeddings
UpperCamelCase_ : List[str] =scale * emb
if flip_sin_to_cos:
UpperCamelCase_ : Tuple =jnp.concatenate([jnp.cos(__lowercase ), jnp.sin(__lowercase )] , axis=1 )
else:
UpperCamelCase_ : Tuple =jnp.concatenate([jnp.sin(__lowercase ), jnp.cos(__lowercase )] , axis=1 )
UpperCamelCase_ : List[Any] =jnp.reshape(__lowercase , [jnp.shape(__lowercase )[0], embedding_dim] )
return signal
class a__ ( nn.Module ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = jnp.floataa
@nn.compact
def __call__( self :Optional[Any] , _lowerCamelCase :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Dict =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(_lowerCamelCase )
UpperCamelCase_ : Any =nn.silu(_lowerCamelCase )
UpperCamelCase_ : Tuple =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(_lowerCamelCase )
return temb
class a__ ( nn.Module ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = False
UpperCAmelCase__ = 1
@nn.compact
def __call__( self :Union[str, Any] , _lowerCamelCase :Dict ):
'''simple docstring'''
return get_sinusoidal_embeddings(
_lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 357 | 1 |
import argparse
import struct
import unittest
class snake_case__ :
def __init__( self : List[str] , _lowerCamelCase : bytes ):
snake_case__ : Optional[Any] = data
# Initialize hash values
snake_case__ : Optional[int] = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
snake_case__ : Any = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
snake_case__ : Dict = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase__ ( _lowerCamelCase : bytes ):
snake_case__ : List[str] = b'\x80' + (b'\x00' * (6_3 - (len(_lowerCamelCase ) + 8) % 6_4))
snake_case__ : Dict = struct.pack('>Q' , (len(_lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase__ ( self : Any ):
# Convert into blocks of 64 bytes
snake_case__ : Dict = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
snake_case__ : List[str] = list(struct.unpack('>16L' , _lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 4_8
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
snake_case__ : Any = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
snake_case__ : Optional[int] = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
snake_case__ : int = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
snake_case__ : Dict = self.ror(_lowerCamelCase , 6 ) ^ self.ror(_lowerCamelCase , 1_1 ) ^ self.ror(_lowerCamelCase , 2_5 )
snake_case__ : Optional[int] = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
snake_case__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
snake_case__ : List[Any] = self.ror(_lowerCamelCase , 2 ) ^ self.ror(_lowerCamelCase , 1_3 ) ^ self.ror(_lowerCamelCase , 2_2 )
snake_case__ : List[Any] = (a & b) ^ (a & c) ^ (b & c)
snake_case__ : str = (sa + maj) % 0x1_00_00_00_00
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
snake_case__ : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
snake_case__ : List[Any] = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
snake_case__ : Tuple = ''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
return 0xff_ff_ff_ff & (value << (3_2 - rotations)) | (value >> rotations)
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
import hashlib
snake_case__ : Union[str, Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_lowerCamelCase ).hash , hashlib.shaaaa(_lowerCamelCase ).hexdigest() )
def lowercase__( ):
import doctest
doctest.testmod()
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
snake_case__ : List[str] = parser.parse_args()
snake_case__ : Any = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
snake_case__ : str = f.read()
else:
snake_case__ : Dict = bytes(A , 'utf-8' )
print(SHAaaa(A ).hash )
if __name__ == "__main__":
main()
| 303 |
def lowercase__( A = 1_0_0_0 ):
snake_case__ : Any = 3
snake_case__ : List[str] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 303 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A (__a ) -> str:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE_ : List[str] = model_type_to_module_name(__a )
SCREAMING_SNAKE_CASE_ : Dict = importlib.import_module(f'.{module_name}' , '''transformers.models''' )
try:
return getattr(__a , __a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__a , '''__name__''' , __a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module('''transformers''' )
if hasattr(__a , __a ):
return getattr(__a , __a )
return None
def _A (__a , __a = None , __a = False , __a = False , __a = None , __a = None , __a = None , __a = False , **__a , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_file_from_repo(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__a , encoding='''utf-8''' ) as reader:
return json.load(__a )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : int):
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(lowercase_)
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('''config''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''trust_remote_code''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = FeatureExtractionMixin.get_feature_extractor_dict(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = config_dict.get('''feature_extractor_type''' , lowercase_)
SCREAMING_SNAKE_CASE_ : str = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
SCREAMING_SNAKE_CASE_ : Optional[int] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(lowercase_ , **lowercase_)
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(lowercase_ , '''feature_extractor_type''' , lowercase_)
if hasattr(lowercase_ , '''auto_map''') and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE_ : Dict = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE_ : Dict = feature_extractor_class_from_name(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE_ : Dict = feature_extractor_class is not None or type(lowercase_) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE_ : Optional[int] = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE_ : Dict = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''code_revision''' , lowercase_)
if os.path.isdir(lowercase_):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase_) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE_ : Any = FEATURE_EXTRACTOR_MAPPING[type(lowercase_)]
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}')
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : List[Any]):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowercase_ , lowercase_)
| 512 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 'naver-clova-ix/donut-base-finetuned-docvqa'
__SCREAMING_SNAKE_CASE = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__SCREAMING_SNAKE_CASE = 'document_qa'
__SCREAMING_SNAKE_CASE = AutoProcessor
__SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel
__SCREAMING_SNAKE_CASE = ['image', 'text']
__SCREAMING_SNAKE_CASE = ['text']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase ="<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_lowercase =task_prompt.replace("{user_input}" , lowerCAmelCase_ )
_lowercase =self.pre_processor.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors="pt" ).input_ids
_lowercase =self.pre_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowerCAmelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowerCAmelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowerCAmelCase_ , ).sequences
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =self.pre_processor.batch_decode(lowerCAmelCase_ )[0]
_lowercase =sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_lowercase =sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_lowercase =re.sub(R"<.*?>" , "" , lowerCAmelCase_ , count=1 ).strip() # remove first task start token
_lowercase =self.pre_processor.tokenajson(lowerCAmelCase_ )
return sequence["answer"]
| 594 | from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase__ = logging.get_logger(__name__)
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =[label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCAmelCase_ ) )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =[sequences]
_lowercase =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCAmelCase_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase_ )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=ZeroShotClassificationArgumentHandler() , *lowerCAmelCase_ , **lowerCAmelCase_ ):
_lowercase =args_parser
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def __lowerCAmelCase ( self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=TruncationStrategy.ONLY_FIRST , **lowerCAmelCase_ ):
_lowercase =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_lowercase =self.tokenizer.eos_token
try:
_lowercase =self.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )
except Exception as e:
if "too short" in str(lowerCAmelCase_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_lowercase =self.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __lowerCAmelCase ( self , **lowerCAmelCase_ ):
if kwargs.get("multi_class" , lowerCAmelCase_ ) is not None:
_lowercase =kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_lowercase ={}
if "candidate_labels" in kwargs:
_lowercase =self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_lowercase =kwargs["hypothesis_template"]
_lowercase ={}
if "multi_label" in kwargs:
_lowercase =kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ , ):
if len(lowerCAmelCase_ ) == 0:
pass
elif len(lowerCAmelCase_ ) == 1 and "candidate_labels" not in kwargs:
_lowercase =args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="This example is {}." ):
_lowercase , _lowercase =self._args_parser(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ):
_lowercase =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCAmelCase_ ) - 1,
**model_input,
}
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =inputs["candidate_label"]
_lowercase =inputs["sequence"]
_lowercase ={k: inputs[k] for k in self.tokenizer.model_input_names}
_lowercase =self.model(**lowerCAmelCase_ )
_lowercase ={
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
_lowercase =[outputs["candidate_label"] for outputs in model_outputs]
_lowercase =[outputs["sequence"] for outputs in model_outputs]
_lowercase =np.concatenate([output["logits"].numpy() for output in model_outputs] )
_lowercase =logits.shape[0]
_lowercase =len(lowerCAmelCase_ )
_lowercase =N // n
_lowercase =logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCAmelCase_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_lowercase =self.entailment_id
_lowercase =-1 if entailment_id == 0 else 0
_lowercase =reshaped_outputs[..., [contradiction_id, entailment_id]]
_lowercase =np.exp(lowerCAmelCase_ ) / np.exp(lowerCAmelCase_ ).sum(-1 , keepdims=lowerCAmelCase_ )
_lowercase =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_lowercase =reshaped_outputs[..., self.entailment_id]
_lowercase =np.exp(lowerCAmelCase_ ) / np.exp(lowerCAmelCase_ ).sum(-1 , keepdims=lowerCAmelCase_ )
_lowercase =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 594 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Tuple = [False] * len(lowerCamelCase)
A_ : List[Any] = [-1] * len(lowerCamelCase)
def dfs(lowerCamelCase : Tuple , lowerCamelCase : List[Any]):
A_ : Tuple = True
A_ : str = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCamelCase , 1 - c)
for i in range(len(lowerCamelCase)):
if not visited[i]:
dfs(lowerCamelCase , 0)
for i in range(len(lowerCamelCase)):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__magic_name__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , lowercase : Optional[int] , lowercase : List[Any]=1_3 , lowercase : Dict=3_2 , lowercase : str=2 , lowercase : int=3 , lowercase : List[str]=1_6 , lowercase : List[Any]=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Any=2 , lowercase : Optional[int]=2.0 , lowercase : Tuple=True , lowercase : Dict=0.0 , lowercase : Optional[Any]=0.0 , lowercase : Union[str, Any]=0.1 , lowercase : Optional[int]="gelu" , lowercase : int=False , lowercase : Dict=True , lowercase : List[str]=0.0_2 , lowercase : List[str]=1e-5 , lowercase : Optional[int]=True , lowercase : int=None , lowercase : Tuple=True , lowercase : str=1_0 , lowercase : List[Any]=8 , lowercase : Optional[int]=["stage1", "stage2", "stage3"] , lowercase : int=[1, 2, 3] , ) -> int:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = hidden_act
UpperCamelCase__ = use_absolute_embeddings
UpperCamelCase__ = patch_norm
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = is_training
UpperCamelCase__ = scope
UpperCamelCase__ = use_labels
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = encoder_stride
UpperCamelCase__ = out_features
UpperCamelCase__ = out_indices
def A ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A ( self : Optional[int] , lowercase : Dict , lowercase : Union[str, Any] , lowercase : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = MaskFormerSwinModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ = model(UpperCamelCase_ )
UpperCamelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A ( self : int , lowercase : Any , lowercase : List[str] , lowercase : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ = MaskFormerSwinBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase_ ):
UpperCamelCase__ = ["""stem"""]
UpperCamelCase__ = MaskFormerSwinBackbone(config=UpperCamelCase_ )
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __lowerCamelCase ,__lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
__a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__a : List[str] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__a : Tuple = False
__a : List[str] = False
__a : Union[str, Any] = False
__a : str = False
__a : str = False
def A ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = MaskFormerSwinModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Any ) -> Any:
'''simple docstring'''
return
def A ( self : str ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def A ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def A ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(UpperCamelCase_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def A ( self : Tuple ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def A ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def A ( self : List[str] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# Swin has a different seq_length
UpperCamelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase__ = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase__ = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def A ( self : Dict ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A ( self : str ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A ( self : str ) -> Union[str, Any]:
'''simple docstring'''
pass
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
UpperCamelCase__ = 0
return t
def check_equivalence(lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : Any={} ):
with torch.no_grad():
UpperCamelCase__ = model(**UpperCamelCase_ , return_dict=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ = model(**UpperCamelCase_ , return_dict=UpperCamelCase_ , **UpperCamelCase_ ).to_tuple()
def recursive_check(lowercase : str , lowercase : List[str] ):
if isinstance(UpperCamelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase_ , UpperCamelCase_ ):
recursive_check(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase_ , UpperCamelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase_ ) , set_nan_tensor_to_zero(UpperCamelCase_ ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(UpperCamelCase_ ).any()} and `inf`: {torch.isinf(UpperCamelCase_ )}. Dict has"
f" `nan`: {torch.isnan(UpperCamelCase_ ).any()} and `inf`: {torch.isinf(UpperCamelCase_ )}."
) , )
recursive_check(UpperCamelCase_ , UpperCamelCase_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
UpperCamelCase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , {"""output_hidden_states""": True} )
UpperCamelCase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
UpperCamelCase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , {"""output_hidden_states""": True} )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ,__lowerCamelCase ):
'''simple docstring'''
__a : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__a : Union[str, Any] = MaskFormerSwinConfig
def A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = MaskFormerSwinModelTester(self )
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCamelCase__ = backbone_class(UpperCamelCase_ )
backbone.to(UpperCamelCase_ )
backbone.eval()
UpperCamelCase__ = backbone(**UpperCamelCase_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCamelCase__ = backbone(**UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCamelCase__ = backbone(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertIsNotNone(outputs.attentions )
| 714 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int ) -> None:
'''simple docstring'''
UpperCamelCase__ = num_of_nodes
UpperCamelCase__ = []
UpperCamelCase__ = {}
def A ( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def A ( self : str , lowercase : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def A ( self : Union[str, Any] , lowercase : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase__ = self.find_component(lowercase )
def A ( self : Tuple , lowercase : list[int] , lowercase : int , lowercase : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase )
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase__ = self.find_component(lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(lowercase )
def A ( self : int ) -> None:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCamelCase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase , lowercase ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase , lowercase , lowercase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
UpperCamelCase__ = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __magic_name__( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 | 0 |
"""simple docstring"""
import qiskit
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> qiskit.result.counts.Counts:
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowerCamelCase = qiskit.QuantumCircuit(snake_case_ , snake_case_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowerCamelCase = qiskit.execute(snake_case_ , snake_case_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 661 | '''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78 | 0 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Any ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A__ : int =deepcopy(UpperCamelCase__ )
elif os.path.exists(UpperCamelCase__ ):
with io.open(UpperCamelCase__ , "r" , encoding="utf-8" ) as f:
A__ : Dict =json.load(UpperCamelCase__ )
else:
try:
A__ : Dict =baseaa.urlsafe_baadecode(UpperCamelCase__ ).decode("utf-8" )
A__ : List[Any] =json.loads(UpperCamelCase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
A__ : Optional[int] =config
self.set_stage_and_offload()
def _UpperCAmelCase ( self : int ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
A__ : Optional[Any] =self.get_value("zero_optimization.stage" , -1 )
# offload
A__ : Any =False
if self.is_zeroa() or self.is_zeroa():
A__ : List[str] =set(["cpu", "nvme"] )
A__ : int =set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A__ : List[str] =True
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Any ):
A__ : List[str] =self.config
# find the config node of interest if it exists
A__ : Tuple =ds_key_long.split("." )
A__ : Tuple =nodes.pop()
for node in nodes:
A__ : int =config.get(UpperCamelCase__ )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=None ):
A__ : str =self.find_config_node(UpperCamelCase__ )
if config is None:
return default
return config.get(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=False ):
A__ : str =self.config
# find the config node of interest if it exists
A__ : Dict =ds_key_long.split("." )
for node in nodes:
A__ : int =config
A__ : List[Any] =config.get(UpperCamelCase__ )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCamelCase__ )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Tuple ):
A__ : Tuple =self.get_value(UpperCamelCase__ )
return False if value is None else bool(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ):
A__ : List[str] =self.get_value(UpperCamelCase__ )
return False if value is None else not bool(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] ):
return self._stage == 2
def _UpperCAmelCase ( self : int ):
return self._stage == 3
def _UpperCAmelCase ( self : List[Any] ):
return self._offload
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =engine
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[Any] ):
# runs backpropagation and handles mixed precision
self.engine.backward(UpperCamelCase__ , **UpperCamelCase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] ):
super().__init__(UpperCamelCase__ , device_placement=UpperCamelCase__ , scaler=UpperCamelCase__ )
A__ : Dict =hasattr(self.optimizer , "overflow" )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[int]=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCAmelCase ( self : str ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCAmelCase ( self : List[Any] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=0.001 , UpperCamelCase__ : List[str]=0 , **UpperCamelCase__ : List[str] ):
A__ : List[Any] =params
A__ : int =lr
A__ : Optional[int] =weight_decay
A__ : str =kwargs
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : int ):
A__ : Union[str, Any] =optimizer
A__ : Dict =total_num_steps
A__ : List[str] =warmup_num_steps
A__ : List[str] =kwargs
| 704 | """simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__A : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : tuple , UpperCamelCase : Path , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase , exist_ok=UpperCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase , UpperCamelCase , f=output_path.as_posix() , input_names=UpperCamelCase , output_names=UpperCamelCase , dynamic_axes=UpperCamelCase , do_constant_folding=UpperCamelCase , use_external_data_format=UpperCamelCase , enable_onnx_checker=UpperCamelCase , opset_version=UpperCamelCase , )
else:
export(
UpperCamelCase , UpperCamelCase , f=output_path.as_posix() , input_names=UpperCamelCase , output_names=UpperCamelCase , dynamic_axes=UpperCamelCase , do_constant_folding=UpperCamelCase , opset_version=UpperCamelCase , )
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : bool = False ):
"""simple docstring"""
A__ : Optional[int] =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A__ : List[Any] ="cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
A__ : Tuple ="cpu"
A__ : Dict =Path(UpperCamelCase )
# VAE DECODER
A__ : List[str] =AutoencoderKL.from_pretrained(model_path + "/vae" )
A__ : Dict =vae_decoder.config.latent_channels
# forward only through the decoder part
A__ : List[Any] =vae_decoder.decode
onnx_export(
UpperCamelCase , model_args=(
torch.randn(1 , UpperCamelCase , 25 , 25 ).to(device=UpperCamelCase , dtype=UpperCamelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=UpperCamelCase , )
del vae_decoder
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__A : Dict = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 595 | 0 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
_lowerCAmelCase : Tuple = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_lowerCAmelCase : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_lowerCAmelCase : Tuple = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __a ( self : List[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str="uniform_average" , snake_case__ : int=True ):
'''simple docstring'''
UpperCAmelCase__ : str = mean_squared_error(
snake_case__ , snake_case__ , sample_weight=snake_case__ , multioutput=snake_case__ , squared=snake_case__ )
return {"mse": mse}
| 438 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__ : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase__ : Tuple = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
| 438 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( lowerCamelCase_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_SCREAMING_SNAKE_CASE : int = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowercase__ ( self : Tuple , __UpperCAmelCase : Optional[Any]=0 ) -> int:
"""simple docstring"""
UpperCamelCase_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCAmelCase ) )
UpperCamelCase_ = torch.manual_seed(__UpperCAmelCase )
UpperCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ort.SessionOptions()
UpperCamelCase_ = False
return options
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase_ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = 'A fantasy landscape, trending on artstation'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images
UpperCamelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase_ = init_image.resize((128, 128) )
UpperCamelCase_ = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = 'A fantasy landscape, trending on artstation'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images
UpperCamelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 559 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Any = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 559 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( a_):
"""simple docstring"""
a__ : int = ["image_processor", "tokenizer"]
a__ : Optional[int] = "ChineseCLIPImageProcessor"
a__ : Any = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : List[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : List[Any] ) -> int:
UpperCAmelCase_= None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _snake_case , )
UpperCAmelCase_= kwargs.pop("""feature_extractor""" )
UpperCAmelCase_= image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_snake_case , _snake_case )
UpperCAmelCase_= self.image_processor
def __call__( self : Tuple , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : Dict ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase_= self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
UpperCAmelCase_= self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
UpperCAmelCase_= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Optional[int] ) -> List[Any]:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _SCREAMING_SNAKE_CASE ( self : int , *__UpperCAmelCase : Dict , **__UpperCAmelCase : int ) -> str:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase_= self.tokenizer.model_input_names
UpperCAmelCase_= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _snake_case , )
return self.image_processor_class
| 593 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Optional[Any] = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase__ : List[Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase__ : Dict = "cuda" if torch.cuda.is_available() else "cpu"
def lowerCamelCase__ ( _A , _A=100 , _A=" " ):
'''simple docstring'''
snake_case_ = text.split(_A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_A ) , _A )]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ , snake_case_ = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(_A ):
titles.append(title if title is not None else "" )
texts.append(_A )
return {"title": titles, "text": texts}
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_A , padding="longest" , return_tensors="pt" )["input_ids"]
snake_case_ = ctx_encoder(input_ids.to(device=_A ) , return_dict=_A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase__ ( _A , _A , _A , ):
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
snake_case_ = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
snake_case_ = dataset.map(_A , batched=_A , num_proc=processing_args.num_proc )
# And compute the embeddings
snake_case_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_A )
snake_case_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
snake_case_ = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
snake_case_ = dataset.map(
partial(_A , ctx_encoder=_A , ctx_tokenizer=_A ) , batched=_A , batch_size=processing_args.batch_size , features=_A , )
# And finally save your dataset
snake_case_ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(_A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
snake_case_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=_A )
# And save the index
snake_case_ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
default=str(Path(UpperCAmelCase__ ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowerCAmelCase_ = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowerCAmelCase_ = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowerCAmelCase_ = field(
default=str(Path(UpperCAmelCase__ ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowerCAmelCase_ = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase__ : Dict = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase__ : Optional[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase__ : int = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 719 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase__ : Optional[int] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __lowercase : Dict ):
"""simple docstring"""
super().__init__()
snake_case_ = torchvision.models.resnetaaa(pretrained=__lowercase )
snake_case_ = list(model.children() )[:-2]
snake_case_ = nn.Sequential(*__lowercase )
snake_case_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def snake_case__ ( self : int , __lowercase : List[str] ):
"""simple docstring"""
snake_case_ = self.pool(self.model(__lowercase ) )
snake_case_ = torch.flatten(__lowercase , start_dim=2 )
snake_case_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , __lowercase : Any , __lowercase : str , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] ):
"""simple docstring"""
snake_case_ = [json.loads(__lowercase ) for l in open(__lowercase )]
snake_case_ = os.path.dirname(__lowercase )
snake_case_ = tokenizer
snake_case_ = labels
snake_case_ = len(__lowercase )
snake_case_ = max_seq_length
snake_case_ = transforms
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : str , __lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__lowercase ) )
snake_case_ , snake_case_ , snake_case_ = sentence[0], sentence[1:-1], sentence[-1]
snake_case_ = sentence[: self.max_seq_length]
snake_case_ = torch.zeros(self.n_classes )
snake_case_ = 1
snake_case_ = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
snake_case_ = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = [len(row["sentence"] ) for row in batch]
snake_case_ , snake_case_ = len(_A ), max(_A )
snake_case_ = torch.zeros(_A , _A , dtype=torch.long )
snake_case_ = torch.zeros(_A , _A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_A , _A ) ):
snake_case_ = input_row["sentence"]
snake_case_ = 1
snake_case_ = torch.stack([row["image"] for row in batch] )
snake_case_ = torch.stack([row["label"] for row in batch] )
snake_case_ = torch.stack([row["image_start_token"] for row in batch] )
snake_case_ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 139 | 0 |
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case__ : Optional[int] = """Create a default config file for Accelerate with only a few flags set."""
def snake_case_ ( _SCREAMING_SNAKE_CASE="no" , _SCREAMING_SNAKE_CASE = default_json_config_file , _SCREAMING_SNAKE_CASE = False ):
__lowercase = Path(lowerCAmelCase__ )
path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
__lowercase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}""" )
__lowercase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
__lowercase = num_gpus
__lowercase = False
if num_gpus > 1:
__lowercase = 'MULTI_GPU'
else:
__lowercase = 'NO'
elif is_xpu_available() and use_xpu:
__lowercase = torch.xpu.device_count()
__lowercase = num_xpus
__lowercase = False
if num_xpus > 1:
__lowercase = 'MULTI_XPU'
else:
__lowercase = 'NO'
elif is_npu_available():
__lowercase = torch.npu.device_count()
__lowercase = num_npus
__lowercase = False
if num_npus > 1:
__lowercase = 'MULTI_NPU'
else:
__lowercase = 'NO'
else:
__lowercase = 0
__lowercase = True
__lowercase = 1
__lowercase = 'NO'
__lowercase = ClusterConfig(**lowerCAmelCase__ )
config.to_json_file(lowerCAmelCase__ )
return path
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = parser.add_parser("default" , parents=lowerCAmelCase__ , help=lowerCAmelCase__ , formatter_class=lowerCAmelCase__ )
parser.add_argument(
"--config_file" , default=lowerCAmelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowerCAmelCase__ , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 402 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
A = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
A = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
A = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,)
def _lowerCamelCase ( self : int ,UpperCamelCase : int ,UpperCamelCase : Optional[int] ) -> Optional[Any]:
_lowercase : Optional[int] = 0.0
for i, j in zip(UpperCamelCase ,UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase ,UpperCamelCase ) else 0.0
_lowercase : Any = n_correct / len(UpperCamelCase )
return {
"accuracy": accuracy,
} | 125 | 0 |
"""simple docstring"""
_a : Dict = 256
# Modulus to hash a string
_a : str = 1_000_003
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> bool:
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
_lowerCAmelCase : str = len(_lowerCamelCase )
if p_len > t_len:
return False
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_lowerCAmelCase : Tuple = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_lowerCAmelCase : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_lowerCAmelCase : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : str = """abc1abc12"""
_lowerCAmelCase : Optional[int] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_lowerCAmelCase : Optional[int] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase ) and not rabin_karp(_lowerCamelCase ,_lowerCamelCase )
# Test 2)
_lowerCAmelCase : Optional[int] = """ABABX"""
_lowerCAmelCase : Tuple = """ABABZABABYABABX"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase )
# Test 3)
_lowerCAmelCase : Any = """AAAB"""
_lowerCAmelCase : Optional[Any] = """ABAAAAAB"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase )
# Test 4)
_lowerCAmelCase : str = """abcdabcy"""
_lowerCAmelCase : Dict = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase )
# Test 5)
_lowerCAmelCase : str = """Lü"""
_lowerCAmelCase : int = """Lüsai"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[str] = """Lue"""
assert not rabin_karp(_lowerCamelCase ,_lowerCamelCase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 663 | """simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCamelCase ( *lowerCamelCase__ ):
"""simple docstring"""
with open(_UpperCamelCase , "r" ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
lowerCAmelCase__ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ = torch.device('''cuda''', local_rank)
lowerCAmelCase__ = socket.gethostname()
lowerCAmelCase__ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ = dist.get_rank()
lowerCAmelCase__ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 496 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( A__ ):
def __init__( self : Tuple , *__a : Tuple , __a : Dict=None , __a : List[str]=None , **__a : Dict ):
'''simple docstring'''
super().__init__(*__a , **__a )
lowerCamelCase__: str = eval_examples
lowerCamelCase__: Optional[int] = post_process_function
def lowerCamelCase_ ( self : str , __a : Optional[Dataset] = None , __a : List[Any]=None , __a : Optional[List[str]] = None , __a : str = "eval" , **__a : Tuple , ):
'''simple docstring'''
lowerCamelCase__: Tuple = gen_kwargs.copy()
lowerCamelCase__: Union[str, Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
lowerCamelCase__: Tuple = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
lowerCamelCase__: Optional[Any] = gen_kwargs
lowerCamelCase__: List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase__: Union[str, Any] = self.get_eval_dataloader(__a )
lowerCamelCase__: Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Optional[int] = self.compute_metrics
lowerCamelCase__: Union[str, Any] = None
lowerCamelCase__: Dict = time.time()
lowerCamelCase__: Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: Any = eval_loop(
__a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: int = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase__: Tuple = self.post_process_function(__a , __a , __a )
lowerCamelCase__: List[Any] = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
else:
lowerCamelCase__: int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase__: List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def lowerCamelCase_ ( self : str , __a : List[str] , __a : List[Any] , __a : Tuple=None , __a : str = "test" , **__a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = gen_kwargs.copy()
lowerCamelCase__: Optional[Any] = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Any = self.compute_metrics
lowerCamelCase__: Optional[int] = None
lowerCamelCase__: int = time.time()
lowerCamelCase__: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: List[str] = eval_loop(
__a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase__: str = self.post_process_function(__a , __a , __a , """predict""" )
lowerCamelCase__: str = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
| 306 | 0 |
'''simple docstring'''
def __UpperCamelCase( _A : Tuple = 10_00 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = 2**power
UpperCAmelCase__ : List[str] = str(__A )
UpperCAmelCase__ : str = list(__A )
UpperCAmelCase__ : Dict = 0
for i in list_num:
sum_of_num += int(__A )
return sum_of_num
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
UpperCamelCase__ : Optional[Any] = solution(power)
print('Sum of the digits is: ', result)
| 701 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {'vocab_file': 'sentencepiece.model'}
UpperCamelCase__ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCamelCase__ : Any = {
'google/rembert': 256,
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=False ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_="[CLS]" ,lowerCamelCase_="[SEP]" ,lowerCamelCase_="[UNK]" ,lowerCamelCase_="[SEP]" ,lowerCamelCase_="[PAD]" ,lowerCamelCase_="[CLS]" ,lowerCamelCase_="[MASK]" ,**lowerCamelCase_ ,) -> Tuple:
'''simple docstring'''
super().__init__(
do_lower_case=lowerCamelCase_ ,remove_space=lowerCamelCase_ ,keep_accents=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase__ : Union[str, Any] = do_lower_case
UpperCAmelCase__ : str = remove_space
UpperCAmelCase__ : Optional[Any] = keep_accents
UpperCAmelCase__ : List[Any] = vocab_file
UpperCAmelCase__ : Dict = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCamelCase_ )
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.__dict__.copy()
UpperCAmelCase__ : Any = None
return state
def __setstate__( self ,lowerCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = d
UpperCAmelCase__ : Any = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : str = self.sp_model.EncodeAsPieces(lowerCamelCase_ )
return pieces
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.sp_model.decode_pieces(lowerCamelCase_ )
return out_string
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file ,lowerCamelCase_ )
return (out_vocab_file,)
| 496 | 0 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
a : Optional[Any] = True
from torch.cuda.amp import autocast
a : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to log verbose messages or not."} , )
__UpperCAmelCase = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
__UpperCAmelCase = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
__UpperCAmelCase = field(
default=0.99_9995 , metadata={"help": "Decay of gumbel temperature during training."} )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowercase = logging.WARNING
if model_args.verbose_logging:
__lowercase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__lowercase = logging.INFO
logger.setLevel(_UpperCamelCase )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCAmelCase = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
__UpperCAmelCase = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
__UpperCAmelCase = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to \'file\'"} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCAmelCase = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there\'s no validation split"
} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = "longest"
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , snake_case_ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
__lowercase = self.feature_extractor.pad(
__UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__lowercase = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
__lowercase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__lowercase = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
__lowercase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__lowercase = 1
__lowercase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__lowercase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__UpperCAmelCase , min_masks=2 , )
return batch
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , *snake_case_ , snake_case_=1 , snake_case_=0 , snake_case_=1.0 , **snake_case_ ) -> Tuple:
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = 0
__lowercase = max_gumbel_temp
__lowercase = min_gumbel_temp
__lowercase = gumbel_temp_decay
def A ( self , snake_case_ , snake_case_ ) -> torch.Tensor:
'''simple docstring'''
model.train()
__lowercase = self._prepare_inputs(__UpperCAmelCase )
if self.use_amp:
with autocast():
__lowercase = self.compute_loss(__UpperCAmelCase , __UpperCAmelCase )
else:
__lowercase = self.compute_loss(__UpperCAmelCase , __UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__lowercase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowercase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
__lowercase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def lowercase_ ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase = parser.parse_args_into_dataclasses()
configure_logger(_UpperCamelCase , _UpperCamelCase )
# Downloading and loading a dataset from the hub.
__lowercase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__lowercase = DatasetDict()
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__lowercase = DatasetDict()
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_UpperCamelCase )
def prepare_dataset(_UpperCamelCase ):
# check that all files have the correct sampling rate
__lowercase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__lowercase = datasets.map(
_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
__lowercase = vectorized_datasets.filter(
lambda _UpperCamelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_UpperCamelCase ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__lowercase = vectorized_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__lowercase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
__lowercase = WavaVecaForPreTraining(_UpperCamelCase )
__lowercase = DataCollatorForWavaVecaPretraining(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
__lowercase = WavaVecaPreTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=_UpperCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 639 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 600851475143 ):
"""simple docstring"""
try:
lowerCAmelCase__ : Union[str, Any] = int(UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : int = 2
while i * i <= n:
while n % i == 0:
lowerCAmelCase__ : str = i
n //= i
i += 1
if n > 1:
lowerCAmelCase__ : List[str] = n
return int(UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565 | 0 |
"""simple docstring"""
import sys
import turtle
def __lowerCAmelCase ( lowercase : tuple[float, float] , lowercase : tuple[float, float] ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __lowerCAmelCase ( lowercase : tuple[float, float] , lowercase : tuple[float, float] , lowercase : tuple[float, float] , lowercase : int , ) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowercase , get_mid(lowercase , lowercase ) , get_mid(lowercase , lowercase ) , depth - 1 )
triangle(lowercase , get_mid(lowercase , lowercase ) , get_mid(lowercase , lowercase ) , depth - 1 )
triangle(lowercase , get_mid(lowercase , lowercase ) , get_mid(lowercase , lowercase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__snake_case = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__snake_case = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) | 701 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Dict = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = "sgugger/tiny-distilbert-classification"
snake_case : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , torchscript=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : int = PyTorchBenchmark(UpperCamelCase__ )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , fpaa=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = "sshleifer/tiny-gpt2"
snake_case : int = AutoConfig.from_pretrained(UpperCamelCase__ )
# set architectures equal to `None`
snake_case : str = None
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
snake_case : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[str] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
snake_case : Optional[int] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = "sshleifer/tinier_bart"
snake_case : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case : str = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Optional[int] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : str = "sshleifer/tinier_bart"
snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(UpperCamelCase__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(UpperCamelCase__ , "train_time.csv" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , "env.csv" ) , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "env.csv" ) ).exists() )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(UpperCamelCase__ , "sequential" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "cumulative" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "current" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , "log.txt" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
snake_case : Dict = PyTorchBenchmark(UpperCamelCase__ )
snake_case : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "log.txt" ) ).exists() )
| 117 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = BloomTokenizerFast
__UpperCAmelCase = BloomTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = """tokenizer_file"""
__UpperCAmelCase = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
snake_case__ : Optional[int] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Optional[Any] , **snake_case_ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Dict = self.get_rust_tokenizer()
snake_case__ : Union[str, Any] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
snake_case__ : Optional[Any] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
snake_case__ : List[str] = tokenizer.batch_encode_plus(snake_case_ )['''input_ids''']
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : int = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[int] , snake_case_ : List[Any]=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : str = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case__ : List[Any] = '''This is a simple input'''
snake_case__ : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
snake_case__ : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case_ , max_length=snake_case_ )
tokenizer_r.encode_plus(snake_case_ , max_length=snake_case_ )
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ )
tokenizer_r.encode(snake_case_ , max_length=snake_case_ )
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
snake_case__ : List[Any] = None # Hotfixing padding = None
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''' )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''' )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.get_rust_tokenizer()
snake_case__ : Optional[Any] = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=snake_case_ )
snake_case__ : str = next(iter(snake_case_ ) )['''premise'''] # pick up one data
snake_case__ : str = list(sample_data.values() )
snake_case__ : Optional[Any] = list(map(tokenizer.encode , snake_case_ ) )
snake_case__ : str = [tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) for x in output_tokens]
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 347 |
'''simple docstring'''
import sys
from collections import defaultdict
class a :
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
snake_case__ : str = []
def __magic_name__ ( self : Any , snake_case_ : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def __magic_name__ ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : Optional[int] = pos
def __magic_name__ ( self : Any , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : int ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case__ : List[str] = 2 * start + 1
else:
snake_case__ : Optional[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case__ , snake_case__ : Dict = heap[smallest_child], positions[smallest_child]
snake_case__ , snake_case__ : Any = (
heap[start],
positions[start],
)
snake_case__ , snake_case__ : Optional[int] = temp, tempa
snake_case__ : Optional[int] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Union[str, Any] = position[index]
while index != 0:
snake_case__ : List[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case__ : Optional[Any] = heap[parent]
snake_case__ : Union[str, Any] = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
snake_case__ : Optional[Any] = val
snake_case__ : Optional[int] = temp
self.set_position(snake_case_ , snake_case_ )
break
snake_case__ : Any = parent
else:
snake_case__ : Any = val
snake_case__ : List[Any] = temp
self.set_position(snake_case_ , 0 )
def __magic_name__ ( self : List[str] , snake_case_ : Tuple , snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : Tuple = positions[0]
snake_case__ : str = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def _a ( __lowerCAmelCase : List[Any] ):
"""simple docstring"""
snake_case__ : Optional[int] = Heap()
snake_case__ : Optional[int] = [0] * len(__lowerCAmelCase )
snake_case__ : Optional[Any] = [-1] * len(__lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case__ : Optional[Any] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case__ : Tuple = []
for vertex in range(len(__lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCAmelCase )
heap.node_position.append(__lowerCAmelCase )
snake_case__ : str = []
snake_case__ : Optional[int] = 1
snake_case__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case__ : str = 0
snake_case__ : Optional[Any] = distance
heap.heapify(__lowerCAmelCase , __lowerCAmelCase )
for _ in range(1 , len(__lowerCAmelCase ) ):
snake_case__ : Optional[int] = heap.delete_minimum(__lowerCAmelCase , __lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case__ : Dict = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCAmelCase )]
):
snake_case__ : Tuple = distance
heap.bottom_to_top(
__lowerCAmelCase , heap.get_position(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Dict = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCAmelCase__ : Tuple = int(input("""Enter number of edges: """).strip())
lowerCAmelCase__ : Optional[Any] = defaultdict(list)
for _ in range(edges_number):
lowerCAmelCase__ : Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 347 | 1 |
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowercase : Any =logging.get_logger(__name__)
_lowercase : Dict[Optional[str], Type[Formatter]] ={}
_lowercase : Dict[Optional[str], str] ={}
_lowercase : Dict[Optional[str], Exception] ={}
def UpperCAmelCase ( lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Tuple = None , ):
'''simple docstring'''
a__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
a__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
a__ = format_type
def UpperCAmelCase ( lowercase__ : List[str] , lowercase__ : int , lowercase__ : List[str] = None ):
'''simple docstring'''
a__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
_lowercase : Tuple =ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
_lowercase : Dict =ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
_lowercase : Optional[int] =ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def UpperCAmelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( lowercase__ : Optional[Any] , **lowercase__ : Dict ):
'''simple docstring'''
a__ = get_format_type_from_alias(lowerCAmelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 703 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
a__ = tempfile.mkdtemp()
a__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
a__ = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"""do_convert_rgb""": True,
}
a__ = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def _A ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _A ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _A ( self , **lowerCamelCase ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _A ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _A ( self ):
'''simple docstring'''
a__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self ):
'''simple docstring'''
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
a__ = self.get_image_processor()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
a__ = self.get_image_processor(do_normalize=lowerCamelCase )
a__ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = self.prepare_image_inputs()
a__ = image_processor(lowerCamelCase , return_tensors="""np""" )
a__ = processor(images=lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = """Alexandra,T-shirt的价格是15便士。"""
a__ = processor(text=lowerCamelCase )
a__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = """Alexandra,T-shirt的价格是15便士。"""
a__ = self.prepare_image_inputs()
a__ = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ = processor.batch_decode(lowerCamelCase )
a__ = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = """Alexandra,T-shirt的价格是15便士。"""
a__ = self.prepare_image_inputs()
a__ = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 412 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__lowerCAmelCase = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
__lowerCAmelCase = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = ' Hello world! cécé herlolip'
__lowerCAmelCase = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = dct.pop(_SCREAMING_SNAKE_CASE )
_snake_case = val
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
_snake_case = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case, _snake_case = emb.weight.shape
_snake_case = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
_snake_case = emb.weight.data
return lin_layer
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
_snake_case = torch.hub.load("""pytorch/fairseq""" , _SCREAMING_SNAKE_CASE ).eval()
else:
_snake_case = load_xsum_checkpoint(_SCREAMING_SNAKE_CASE )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case = checkpoint_path.replace(""".""" , """-""" )
_snake_case = BartConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = bart.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
_snake_case = BartTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ).encode(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
_snake_case = bart.state_dict()
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
_snake_case = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = BartForSequenceClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
_snake_case = bart.predict("""mnli""" , _SCREAMING_SNAKE_CASE , return_logits=_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE )[0] # logits
else: # no classification heads to worry about
_snake_case = bart.model.state_dict()
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
_snake_case = state_dict["""decoder.embed_tokens.weight"""]
_snake_case = bart.extract_features(_SCREAMING_SNAKE_CASE )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case = BartModel(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE ).model[0]
else:
_snake_case = BartForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval() # an existing summarization ckpt
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , """lm_head""" ):
_snake_case = make_linear_from_emb(model.model.shared )
_snake_case = model.model(_SCREAMING_SNAKE_CASE )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
__lowerCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config) | 585 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCAmelCase = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCAmelCase = concatenate_datasets
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadManager
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 585 | 1 |
from __future__ import annotations
import math
class A__ :
def __init__( self : List[Any] , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = size
# approximate the overall size of segment tree with given value
__lowercase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__lowercase = [0 for i in range(0 , 4 * size )]
__lowercase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def a__ ( self : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
return idx * 2
def a__ ( self : str , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
return idx * 2 + 1
def a__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[int] ) -> Tuple:
"""simple docstring"""
if left_element == right_element:
__lowercase = a[left_element - 1]
else:
__lowercase = (left_element + right_element) // 2
self.build(self.left(__A ) , __A , __A , __A )
self.build(self.right(__A ) , mid + 1 , __A , __A )
__lowercase = max(
self.segment_tree[self.left(__A )] , self.segment_tree[self.right(__A )] )
def a__ ( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
if self.flag[idx] is True:
__lowercase = self.lazy[idx]
__lowercase = False
if left_element != right_element:
__lowercase = self.lazy[idx]
__lowercase = self.lazy[idx]
__lowercase = True
__lowercase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__lowercase = val
if left_element != right_element:
__lowercase = val
__lowercase = val
__lowercase = True
__lowercase = True
return True
__lowercase = (left_element + right_element) // 2
self.update(self.left(__A ) , __A , __A , __A , __A , __A )
self.update(self.right(__A ) , mid + 1 , __A , __A , __A , __A )
__lowercase = max(
self.segment_tree[self.left(__A )] , self.segment_tree[self.right(__A )] )
return True
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
if self.flag[idx] is True:
__lowercase = self.lazy[idx]
__lowercase = False
if left_element != right_element:
__lowercase = self.lazy[idx]
__lowercase = self.lazy[idx]
__lowercase = True
__lowercase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__lowercase = (left_element + right_element) // 2
__lowercase = self.query(self.left(__A ) , __A , __A , __A , __A )
__lowercase = self.query(self.right(__A ) , mid + 1 , __A , __A , __A )
return max(__A , __A )
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , __A , __A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
'''simple docstring'''
__UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _snake_case ( A ) -> str:
assert type(A ) in (int, float) and decimal == int(A )
lowerCAmelCase__ = int(A )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
if decimal < 0:
lowerCAmelCase__ = True
decimal *= -1
while decimal > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(A , 16 )
lowerCAmelCase__ = values[remainder] + hexadecimal
lowerCAmelCase__ = '''0x''' + hexadecimal
if negative:
lowerCAmelCase__ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase : Tuple = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase : int = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
__UpperCAmelCase : int = '▁'
class __lowerCAmelCase (__UpperCamelCase ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = AlbertTokenizer
def __init__( self , a=None , a=None , a=True , a=True , a=False , a="[CLS]" , a="[SEP]" , a="<unk>" , a="[SEP]" , a="<pad>" , a="[CLS]" , a="[MASK]" , **a , ):
"""simple docstring"""
snake_case_ :Tuple = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a )
if isinstance(a , a )
else mask_token
)
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
snake_case_ :Tuple = do_lower_case
snake_case_ :str = remove_space
snake_case_ :List[str] = keep_accents
snake_case_ :Union[str, Any] = vocab_file
snake_case_ :Any = False if not self.vocab_file else True
def _a ( self , a , a = None ):
"""simple docstring"""
snake_case_ :List[str] = [self.sep_token_id]
snake_case_ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , a , a = None ):
"""simple docstring"""
snake_case_ :Union[str, Any] = [self.sep_token_id]
snake_case_ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , a , a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ :Dict = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 584 | 0 |
'''simple docstring'''
import random
from typing import Any
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
__a = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__a = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__a , __a = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE_ = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 201 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'sentencepiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
SCREAMING_SNAKE_CASE_ = {
'google/rembert': 2_56,
}
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Tuple =VOCAB_FILES_NAMES
a_ :Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ :Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : int="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Any="[SEP]" , SCREAMING_SNAKE_CASE__ : Dict="[PAD]" , SCREAMING_SNAKE_CASE__ : Dict="[CLS]" , SCREAMING_SNAKE_CASE__ : Dict="[MASK]" , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor()
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.sp_model )
def __a ( self : str ):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a = d
__a = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=False ):
'''simple docstring'''
__a = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE__ )
return pieces
def __a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a = self.sp_model.decode_pieces(SCREAMING_SNAKE_CASE__ )
return out_string
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
__a = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 201 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase_ = get_logger(__name__)
class _UpperCamelCase :
'''simple docstring'''
_A = "dummy_data"
_A = "datasets"
_A = False
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[Version, str] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[List[Callable]] = None , ):
_a = 0
_a = dataset_name
_a = cache_dir
_a = use_local_dummy_data
_a = config
# download_callbacks take a single url as input
_a = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_a = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_a = str(SCREAMING_SNAKE_CASE_ )
# to be downloaded
_a = None
_a = None
@property
def _UpperCAmelCase ( self : Dict ):
if self._dummy_file is None:
_a = self.download_dummy_data()
return self._dummy_file
@property
def _UpperCAmelCase ( self : Optional[Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def _UpperCAmelCase ( self : Any ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def _UpperCAmelCase ( self : Optional[Any] ):
_a = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_a = cached_path(
SCREAMING_SNAKE_CASE_ , cache_dir=self.cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ , force_extract=SCREAMING_SNAKE_CASE_ )
return os.path.join(SCREAMING_SNAKE_CASE_ , self.dummy_file_name )
@property
def _UpperCAmelCase ( self : str ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _UpperCAmelCase ( self : Any ):
if self._bucket_url is None:
_a = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def _UpperCAmelCase ( self : str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def _UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , *SCREAMING_SNAKE_CASE_ : int ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_a = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_a = self.dummy_file_name
# special case when data_url is a dict
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return self.create_dummy_data_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return self.create_dummy_data_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return self.create_dummy_data_single(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , *SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.download_and_extract(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return self.download_and_extract(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ):
return path
def _UpperCAmelCase ( self : int ):
return {}
def _UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ):
_a = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for single_url in single_urls:
download_callback(SCREAMING_SNAKE_CASE_ )
else:
_a = single_urls
download_callback(SCREAMING_SNAKE_CASE_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_a = [os.path.join(SCREAMING_SNAKE_CASE_ , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE_ ).name ) ) for x in single_urls]
else:
_a = single_urls
_a = os.path.join(SCREAMING_SNAKE_CASE_ , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE_ ).name ) )
_a = value
# make sure that values are unique
if all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_a = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ):
_a = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_a = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , SCREAMING_SNAKE_CASE_ ) ) for url in data_url )
_a = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_a = [data_url[0]] * len(SCREAMING_SNAKE_CASE_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a = os.path.join(SCREAMING_SNAKE_CASE_ , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(SCREAMING_SNAKE_CASE_ )
return dummy_data_list
def _UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a = os.path.join(SCREAMING_SNAKE_CASE_ , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(SCREAMING_SNAKE_CASE_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _UpperCAmelCase ( self : Dict ):
pass
def _UpperCAmelCase ( self : Union[str, Any] ):
pass
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
def _iter_archive_members(SCREAMING_SNAKE_CASE_ : Tuple ):
# this preserves the order of the members inside the ZIP archive
_a = Path(self.dummy_file ).parent
_a = path.relative_to(SCREAMING_SNAKE_CASE_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_a = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(SCREAMING_SNAKE_CASE_ )
_a = Path(SCREAMING_SNAKE_CASE_ )
_a = _iter_archive_members(SCREAMING_SNAKE_CASE_ ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(SCREAMING_SNAKE_CASE_ ).as_posix(), file_path.open('rb' )
def _UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_a = [paths]
for path in paths:
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
if os.path.basename(SCREAMING_SNAKE_CASE_ ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(SCREAMING_SNAKE_CASE_ ):
if os.path.basename(SCREAMING_SNAKE_CASE_ ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(SCREAMING_SNAKE_CASE_ ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 562 |
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase_ = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase_ = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_a = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_a = {'BertModelTest': 'BertModelTester'}
_a = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
_a = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
_a = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
_a = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : List[Any] ):
_a = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_a = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_a = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
_a = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 562 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCamelCase__ :
"""simple docstring"""
__a = BlenderbotConfig
__a = {}
__a = """gelu"""
def __init__( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=13 , UpperCamelCase : str=7 , UpperCamelCase : int=True , UpperCamelCase : Tuple=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : int=32 , UpperCamelCase : Dict=2 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : int=37 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : int=2 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : List[str]=0 , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Tuple = eos_token_id
__UpperCAmelCase : List[str] = pad_token_id
__UpperCAmelCase : Dict = bos_token_id
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCAmelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase : str = prepare_blenderbot_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFBlenderbotModel(config=UpperCamelCase ).get_decoder()
__UpperCAmelCase : Optional[Any] = inputs_dict["""input_ids"""]
__UpperCAmelCase : Dict = input_ids[:1, :]
__UpperCAmelCase : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
__UpperCAmelCase : str = inputs_dict["""head_mask"""]
__UpperCAmelCase : Tuple = 1
# first forward pass
__UpperCAmelCase : int = model(UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , use_cache=UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCAmelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCAmelCase : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCAmelCase : Dict = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
__UpperCAmelCase : int = model(UpperCamelCase , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCAmelCase : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , rtol=1e-3 )
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : str=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__a = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__a = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a = True
__a = False
__a = False
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFBlenderbotModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase )
@require_tokenizers
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = ["""My friends are cool but they eat too many carbs."""]
__a = """facebook/blenderbot-400M-distill"""
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , return_tensors="""tf""" )
__UpperCAmelCase : str = self.model.generate(
model_inputs.input_ids , )
__UpperCAmelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 299 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase : str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = {}
with open(_UpperCamelCase , """r""" ) as file:
for line_number, line in enumerate(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = line.strip()
if line:
__UpperCAmelCase : List[Any] = line.split()
__UpperCAmelCase : List[str] = line_number
__UpperCAmelCase : List[str] = words[0]
__UpperCAmelCase : Dict = value
return result
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCAmelCase : Any = getattr(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase ):
__UpperCAmelCase : Optional[int] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCAmelCase : Tuple = """param"""
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Dict = getattr(_UpperCamelCase , _UpperCamelCase ).shape
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : Any = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = shape_pointer.shape
# let's reduce dimension
__UpperCAmelCase : Dict = value[0]
else:
__UpperCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase : int = value
elif weight_type == "weight_g":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
__UpperCAmelCase : int = value
elif weight_type == "bias":
__UpperCAmelCase : Any = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCAmelCase : Dict = getattr(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : List[str] = value
else:
__UpperCAmelCase : List[Any] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : str ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase ):
__UpperCAmelCase : Tuple = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCAmelCase : int = """param"""
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Optional[int] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : Optional[Any] = """.""".join([key, hf_param_name] )
else:
__UpperCAmelCase : List[str] = key
__UpperCAmelCase : Tuple = value if """lm_head""" in full_key else value[0]
UpperCAmelCase : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Any=None , _UpperCamelCase : str=None ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = False
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : Optional[Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCAmelCase : Dict = True
if "*" in mapped_key:
__UpperCAmelCase : str = name.split(_UpperCamelCase )[0].split(""".""" )[-2]
__UpperCAmelCase : Dict = mapped_key.replace("""*""" , _UpperCamelCase )
if "weight_g" in name:
__UpperCAmelCase : List[Any] = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : List[str] = """weight_v"""
elif "bias" in name:
__UpperCAmelCase : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : Dict = """weight"""
else:
__UpperCAmelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return is_used
return is_used
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Dict = fairseq_model.state_dict()
__UpperCAmelCase : Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : List[Any] = True
else:
__UpperCAmelCase : Any = load_wavaveca_layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : Dict = name.split(""".""" )
__UpperCAmelCase : int = int(items[0] )
__UpperCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Dict=False ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(_UpperCamelCase )
else:
__UpperCAmelCase : List[Any] = WavaVecaConfig()
if is_seq_class:
__UpperCAmelCase : List[Any] = read_txt_into_dict(_UpperCamelCase )
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Dict = WavaVecaForSequenceClassification(_UpperCamelCase )
__UpperCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
feature_extractor.save_pretrained(_UpperCamelCase )
elif is_finetuned:
if dict_path:
__UpperCAmelCase : Union[str, Any] = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : Optional[Any] = target_dict.pad_index
__UpperCAmelCase : Union[str, Any] = target_dict.bos_index
__UpperCAmelCase : Optional[int] = target_dict.eos_index
__UpperCAmelCase : str = len(target_dict.symbols )
__UpperCAmelCase : List[Any] = os.path.join(_UpperCamelCase , """vocab.json""" )
if not os.path.isdir(_UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__UpperCAmelCase : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Union[str, Any] = 1
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : int = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
__UpperCAmelCase : Any = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__UpperCAmelCase : int = WavaVecaForCTC(_UpperCamelCase )
else:
__UpperCAmelCase : str = WavaVecaForPreTraining(_UpperCamelCase )
if is_finetuned or is_seq_class:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCAmelCase : Tuple = argparse.Namespace(task="""audio_pretraining""" )
__UpperCAmelCase : Dict = fairseq.tasks.setup_task(_UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase : Union[str, Any] = parser.parse_args()
UpperCAmelCase : Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 299 | 1 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase__ =logging.getLogger(__name__)
class a_ ( __UpperCAmelCase ):
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ):
a_ = self.layer[current_layer](snake_case__ , snake_case__ , head_mask[current_layer] )
a_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __UpperCAmelCase , )
class a_ ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase ):
super().__init__(snake_case__ )
a_ = BertEncoderWithPabee(snake_case__ )
self.init_weights()
a_ = 0
a_ = 0
a_ = 0
a_ = 0
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = threshold
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = patience
def lowerCAmelCase__ ( self ):
a_ = 0
a_ = 0
def lowerCAmelCase__ ( self ):
a_ = self.inference_layers_num / self.inference_instances_num
a_ = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(snake_case__ )
@add_start_docstrings_to_model_forward(snake_case__ )
def lowerCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
a_ = input_ids.size()
elif inputs_embeds is not None:
a_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
a_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a_ = torch.ones(snake_case__ , device=snake_case__ )
if token_type_ids is None:
a_ = torch.zeros(snake_case__ , dtype=torch.long , device=snake_case__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a_ = self.get_extended_attention_mask(snake_case__ , snake_case__ , snake_case__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
a_ = encoder_hidden_states.size()
a_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
a_ = torch.ones(snake_case__ , device=snake_case__ )
a_ = self.invert_attention_mask(snake_case__ )
else:
a_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a_ = self.get_head_mask(snake_case__ , self.config.num_hidden_layers )
a_ = self.embeddings(
input_ids=snake_case__ , position_ids=snake_case__ , token_type_ids=snake_case__ , inputs_embeds=snake_case__ )
a_ = embedding_output
if self.training:
a_ = []
for i in range(self.config.num_hidden_layers ):
a_ = self.encoder.adaptive_forward(
snake_case__ , current_layer=snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ )
a_ = self.pooler(snake_case__ )
a_ = output_layers[i](output_dropout(snake_case__ ) )
res.append(snake_case__ )
elif self.patience == 0: # Use all layers for inference
a_ = self.encoder(
snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
a_ = self.pooler(encoder_outputs[0] )
a_ = [output_layers[self.config.num_hidden_layers - 1](snake_case__ )]
else:
a_ = 0
a_ = None
a_ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
a_ = self.encoder.adaptive_forward(
snake_case__ , current_layer=snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ )
a_ = self.pooler(snake_case__ )
a_ = output_layers[i](snake_case__ )
if regression:
a_ = logits.detach()
if patient_result is not None:
a_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
a_ = 0
else:
a_ = logits.detach().argmax(dim=1 )
if patient_result is not None:
a_ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case__ ) ):
patient_counter += 1
else:
a_ = 0
a_ = logits
if patient_counter == self.patience:
break
a_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __UpperCAmelCase , )
class a_ ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase ):
super().__init__(snake_case__ )
a_ = config.num_labels
a_ = BertModelWithPabee(snake_case__ )
a_ = nn.Dropout(config.hidden_dropout_prob )
a_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case__ )
def lowerCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ):
a_ = self.bert(
input_ids=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
a_ = (logits[-1],)
if labels is not None:
a_ = None
a_ = 0
for ix, logits_item in enumerate(snake_case__ ):
if self.num_labels == 1:
# We are doing regression
a_ = MSELoss()
a_ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
a_ = CrossEntropyLoss()
a_ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
a_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
a_ = (total_loss / total_weights,) + outputs
return outputs
| 263 |
"""simple docstring"""
def lowerCamelCase (a_ :Tuple , a_ :int , a_ :Tuple , a_ :List[Any]) -> str:
if height >= 1:
move_tower(height - 1 , a_ , a_ , a_)
move_disk(a_ , a_)
move_tower(height - 1 , a_ , a_ , a_)
def lowerCamelCase (a_ :int , a_ :Union[str, Any]) -> str:
print('''moving disk from''' , a_ , '''to''' , a_)
def lowerCamelCase () -> Tuple:
lowercase :int = int(input('''Height of hanoi: ''').strip())
move_tower(a_ , '''A''' , '''B''' , '''C''')
if __name__ == "__main__":
main()
| 677 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = eval_examples
lowercase__ : Any = post_process_function
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = "eval"):
'''simple docstring'''
lowercase__ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ : Union[str, Any] = self.get_eval_dataloader(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : Optional[Any] = self.compute_metrics
lowercase__ : Dict = None
lowercase__ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ : Tuple = time.time()
try:
lowercase__ : str = eval_loop(
SCREAMING_SNAKE_CASE_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , metric_key_prefix=SCREAMING_SNAKE_CASE_ , )
finally:
lowercase__ : Optional[Any] = compute_metrics
lowercase__ : Tuple = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ : int = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions)
lowercase__ : List[str] = self.compute_metrics(SCREAMING_SNAKE_CASE_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase__ : List[str] = metrics.pop(SCREAMING_SNAKE_CASE_)
metrics.update(output.metrics)
else:
lowercase__ : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(SCREAMING_SNAKE_CASE_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase__ : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE_)
return metrics
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = "test"):
'''simple docstring'''
lowercase__ : List[Any] = self.get_test_dataloader(SCREAMING_SNAKE_CASE_)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[str] = self.compute_metrics
lowercase__ : Any = None
lowercase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ : List[str] = time.time()
try:
lowercase__ : Optional[Any] = eval_loop(
SCREAMING_SNAKE_CASE_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , metric_key_prefix=SCREAMING_SNAKE_CASE_ , )
finally:
lowercase__ : Optional[int] = compute_metrics
lowercase__ : Tuple = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ : Dict = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions , """predict""")
lowercase__ : Any = self.compute_metrics(SCREAMING_SNAKE_CASE_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase__ : List[str] = metrics.pop(SCREAMING_SNAKE_CASE_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE_)
| 12 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : float, lowercase__ : float, lowercase__ : float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 0 |
from __future__ import annotations
def __a ( __lowerCamelCase : int | str ) -> bool:
'''simple docstring'''
lowercase_ = str(__lowerCamelCase )
return n == n[::-1]
def __a ( __lowerCamelCase : int = 1_000_000 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 0
for i in range(1 , __lowerCamelCase ):
if is_palindrome(__lowerCamelCase ) and is_palindrome(bin(__lowerCamelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 719 | '''simple docstring'''
def __a ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase_ = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 461 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__A : Union[str, Any] = TypeVar("KEY")
__A : Union[str, Any] = TypeVar("VAL")
@dataclass(frozen=_SCREAMING_SNAKE_CASE ,slots=_SCREAMING_SNAKE_CASE)
class __snake_case ( Generic[KEY, VAL]):
"""simple docstring"""
lowercase = 42
lowercase = 42
class __snake_case ( _Item):
"""simple docstring"""
def __init__( self : int ) -> None:
super().__init__(lowerCamelCase , lowerCamelCase )
def __bool__( self : Tuple ) -> bool:
return False
__A : Any = _DeletedItem()
class __snake_case ( MutableMapping[KEY, VAL]):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : int = 8 , lowerCamelCase : float = 0.75 ) -> None:
lowerCAmelCase_ : str = initial_block_size
lowerCAmelCase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase_ : Optional[Any] = capacity_factor
lowerCAmelCase_ : List[Any] = 0
def __lowercase ( self : List[Any] , lowerCamelCase : KEY ) -> int:
return hash(lowerCamelCase ) % len(self._buckets )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int:
return (ind + 1) % len(self._buckets )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : KEY , lowerCamelCase : VAL ) -> bool:
lowerCAmelCase_ : Union[str, Any] = self._buckets[ind]
if not stored:
lowerCAmelCase_ : List[Any] = _Item(lowerCamelCase , lowerCamelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase_ : List[str] = _Item(lowerCamelCase , lowerCamelCase )
return True
else:
return False
def __lowercase ( self : Optional[Any] ) -> bool:
lowerCAmelCase_ : str = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase )
def __lowercase ( self : Dict ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __lowercase ( self : Tuple , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Union[str, Any] = self._buckets
lowerCAmelCase_ : str = [None] * new_size
lowerCAmelCase_ : str = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __lowercase ( self : Tuple ) -> None:
self._resize(len(self._buckets ) * 2 )
def __lowercase ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __lowercase ( self : Optional[int] , lowerCamelCase : KEY ) -> Iterator[int]:
lowerCAmelCase_ : int = self._get_bucket_index(lowerCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase_ : Dict = self._get_next_ind(lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : KEY , lowerCamelCase : VAL ) -> None:
for ind in self._iterate_buckets(lowerCamelCase ):
if self._try_set(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
break
def __setitem__( self : Tuple , lowerCamelCase : KEY , lowerCamelCase : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase , lowerCamelCase )
def __delitem__( self : Optional[int] , lowerCamelCase : KEY ) -> None:
for ind in self._iterate_buckets(lowerCamelCase ):
lowerCAmelCase_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase_ : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , lowerCamelCase : KEY ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase ):
lowerCAmelCase_ : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase )
def __len__( self : Any ) -> int:
return self._len
def __iter__( self : Optional[Any] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[Any] ) -> str:
lowerCAmelCase_ : int = """ ,""".join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 275 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'roberta-prelayernorm'
def __init__( self : Tuple , lowerCamelCase : Tuple=5_02_65 , lowerCamelCase : Optional[int]=7_68 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : int=30_72 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[int]=5_12 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : Any=1E-12 , lowerCamelCase : int=1 , lowerCamelCase : List[Any]=0 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]="absolute" , lowerCamelCase : List[Any]=True , lowerCamelCase : Tuple=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : List[str] = position_embedding_type
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@property
def __lowercase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 275 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 468 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 468 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( _UpperCAmelCase ):
def __init__( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Any ):
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ : List[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self : Any , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE__ : List[str] = randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ : str = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : int = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 35 |
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 306 | 0 |
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: str , __A: Any ) -> Optional[Any]:
_A = data
_A = None
def __repr__( self: int ) -> str:
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int ) -> Any:
_A = None
def __iter__( self: List[str] ) -> Any:
_A = self.head
while node:
yield node.data
_A = node.next
def __len__( self: int ) -> int:
return sum(1 for _ in self )
def __repr__( self: Optional[int] ) -> str:
return "->".join([str(__A ) for item in self] )
def __getitem__( self: int , __A: int ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: Tuple , __A: int , __A: Any ) -> None:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
_A = self.head
for _ in range(__A ):
_A = current.next
_A = data
def __A ( self: int , __A: Any ) -> None:
self.insert_nth(len(self ) , __A )
def __A ( self: List[str] , __A: Any ) -> None:
self.insert_nth(0 , __A )
def __A ( self: str , __A: int , __A: Any ) -> None:
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
_A = Node(__A )
if self.head is None:
_A = new_node
elif index == 0:
_A = self.head # link new_node to head
_A = new_node
else:
_A = self.head
for _ in range(index - 1 ):
_A = temp.next
_A = temp.next
_A = new_node
def __A ( self: Union[str, Any] ) -> None: # print every node data
print(self )
def __A ( self: Dict ) -> Any:
return self.delete_nth(0 )
def __A ( self: Tuple ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __A ( self: Optional[Any] , __A: int = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
_A = self.head # default first node
if index == 0:
_A = self.head.next
else:
_A = self.head
for _ in range(index - 1 ):
_A = temp.next
_A = temp.next
_A = temp.next.next
return delete_node.data
def __A ( self: Any ) -> bool:
return self.head is None
def __A ( self: Any ) -> None:
_A = None
_A = self.head
while current:
# Store the current node's next node.
_A = current.next
# Make the current node's next point backwards
_A = prev
# Make the previous node be the current node
_A = current
# Make the current node the next node (to progress iteration)
_A = next_node
# Return prev in order to put the head at the end
_A = prev
def __A ( ):
'''simple docstring'''
_A = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_A = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def __A ( ):
'''simple docstring'''
_A = [
-9,
1_00,
Node(77_34_51_12 ),
'''dlrow olleH''',
7,
55_55,
0,
-1_92.5_55_55,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
_A = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_A = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_A = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_A = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __A ( ):
'''simple docstring'''
from doctest import testmod
testmod()
_A = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(_lowercase )
print('''\nReading/changing Node data using indexing:''' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_A = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(_lowercase )
print(f"""length of linked_list is : {len(_lowercase )}""" )
if __name__ == "__main__":
main()
| 62 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__A = '\\n Text data.\n Second line of data.'
__A = 'file'
@pytest.fixture(scope='''session''' )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_A = bytes(_lowercase , '''utf-8''' )
with zstd.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture
def __A ( _lowercase ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f:
f.write(_lowercase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_A = input_paths[compression_format]
_A = tmp_path / '''cache'''
_A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase )
_A = cached_path(_lowercase , download_config=_lowercase )
with open(_lowercase ) as f:
_A = f.read()
with open(_lowercase ) as f:
_A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = '''custom_cache'''
_A = '''custom_extracted_dir'''
_A = tmp_path / '''custom_extracted_path'''
if default_extracted:
_A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) )
_A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A = xz_file
_A = (
DownloadConfig(extract_compressed_file=_lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase )
)
_A = cached_path(_lowercase , download_config=_lowercase )
assert Path(_lowercase ).parent.parts[-2:] == expected
def __A ( _lowercase ):
'''simple docstring'''
_A = str(Path(_lowercase ).resolve() )
assert cached_path(_lowercase ) == text_file
# relative path
_A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowercase ) == text_file
def __A ( _lowercase ):
'''simple docstring'''
_A = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_lowercase ):
cached_path(_lowercase )
# relative path
_A = '''./__missing_file__.txt'''
with pytest.raises(_lowercase ):
cached_path(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_lowercase ) as f:
_A = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( ):
'''simple docstring'''
with pytest.raises(_lowercase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
http_get('''https://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
fsspec_head('''s3://huggingface.co''' )
| 62 | 1 |
"""simple docstring"""
def __magic_name__ ( __snake_case : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(__snake_case , (list, tuple) ) or not all(
isinstance(__snake_case , __snake_case ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
lowercase : Optional[int] = numbers[0]
for i in range(1 , len(__snake_case ) ):
# update the maximum and minimum subarray products
lowercase : List[Any] = numbers[i]
if number < 0:
lowercase , lowercase : int = min_till_now, max_till_now
lowercase : int = max(__snake_case , max_till_now * number )
lowercase : str = min(__snake_case , min_till_now * number )
# update the maximum product found till now
lowercase : Tuple = max(__snake_case , __snake_case )
return max_prod
| 361 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
UpperCAmelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
UpperCAmelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
UpperCAmelCase = '''.'''.join(A )
return test_module_path
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
UpperCAmelCase = get_module_path(A )
UpperCAmelCase = importlib.import_module(A )
return test_module
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = get_test_module(A )
for attr in dir(A ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(A , A ) )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = get_test_module(A )
for attr in dir(A ):
UpperCAmelCase = getattr(A , A )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCAmelCase = getattr(A , '''all_model_classes''' , [] )
if len(A ) > 0:
test_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = get_test_classes(A )
UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = test_class()
if hasattr(A , '''setUp''' ):
test.setUp()
UpperCAmelCase = None
if hasattr(A , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCAmelCase = test.model_tester.__class__
return model_tester
def lowerCamelCase__ ( A : Tuple , A : int ):
'''simple docstring'''
UpperCAmelCase = get_test_classes(A )
UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : Any , A : Tuple ):
'''simple docstring'''
UpperCAmelCase = get_test_classes_for_model(A , A )
UpperCAmelCase = []
for test_class in test_classes:
UpperCAmelCase = get_model_tester_from_test_class(A )
if tester_class is not None:
tester_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = get_test_classes(A )
UpperCAmelCase = {test_class: get_model_tester_from_test_class(A ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
UpperCAmelCase = get_model_classes(A )
UpperCAmelCase = {
model_class: get_test_classes_for_model(A , A ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = get_model_classes(A )
UpperCAmelCase = {
model_class: get_tester_classes_for_model(A , A ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
if isinstance(A , A ):
return o
elif isinstance(A , A ):
return o.__name__
elif isinstance(A , (list, tuple) ):
return [to_json(A ) for x in o]
elif isinstance(A , A ):
return {to_json(A ): to_json(A ) for k, v in o.items()}
else:
return o
| 210 | 0 |
'''simple docstring'''
from __future__ import annotations
__a: Dict = list[tuple[int, int]]
__a: Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__a: int = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Tuple:
lowercase__ : Dict = pos_x
lowercase__ : Union[str, Any] = pos_y
lowercase__ : Tuple = (pos_y, pos_x)
lowercase__ : Optional[int] = goal_x
lowercase__ : Dict = goal_y
lowercase__ : List[Any] = g_cost
lowercase__ : Optional[Any] = parent
lowercase__ : Optional[Any] = self.calculate_heuristic()
def _lowerCAmelCase( self ) -> float:
lowercase__ : Optional[Any] = abs(self.pos_x - self.goal_x )
lowercase__ : int = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , __lowerCAmelCase ) -> bool:
return self.f_cost < other.f_cost
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
lowercase__ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase )
lowercase__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCAmelCase )
lowercase__ : List[Any] = [self.start]
lowercase__ : list[Node] = []
lowercase__ : int = False
def _lowerCAmelCase( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowercase__ : Optional[Any] = True
return self.retrace_path(__lowerCAmelCase )
self.closed_nodes.append(__lowerCAmelCase )
lowercase__ : Any = self.get_successors(__lowerCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
lowercase__ : str = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCAmelCase )
else:
self.open_nodes.append(__lowerCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def _lowerCAmelCase( self , __lowerCAmelCase ) -> list[Node]:
lowercase__ : List[str] = []
for action in delta:
lowercase__ : Tuple = parent.pos_x + action[1]
lowercase__ : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) )
return successors
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Path:
lowercase__ : Tuple = node
lowercase__ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase__ : Dict = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__a: str = (0, 0)
__a: Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
__a: Dict = GreedyBestFirst(init, goal)
__a: Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__a: Tuple = 2
for elem in grid:
print(elem)
| 428 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__a: Dict = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Optional[int] , a : Optional[Any] , a : List[Any]=7 , a : Dict=3 , a : List[Any]=30 , a : Any=4_00 , a : str=True , a : int=None , a : List[Any]=True , a : Tuple=[0.5, 0.5, 0.5] , a : Dict=[0.5, 0.5, 0.5] , a : Tuple=True , a : Dict=1 / 2_55 , a : Tuple=True , ):
"""simple docstring"""
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Optional[Any] , a : List[Any]=False ):
"""simple docstring"""
if not batched:
__lowerCamelCase = image_inputs[0]
if isinstance(a , Image.Image ):
__lowerCamelCase , __lowerCamelCase = image.size
else:
__lowerCamelCase , __lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
__lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
__lowerCamelCase = self.size['''shortest_edge''']
__lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
__lowerCamelCase = self.size['''shortest_edge''']
__lowerCamelCase = self.size['''shortest_edge''']
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(a , key=lambda a : item[0] )[0]
__lowerCamelCase = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : int =DetaImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = DetaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , '''image_mean''' ) )
self.assertTrue(hasattr(a , '''image_std''' ) )
self.assertTrue(hasattr(a , '''do_normalize''' ) )
self.assertTrue(hasattr(a , '''do_resize''' ) )
self.assertTrue(hasattr(a , '''do_rescale''' ) )
self.assertTrue(hasattr(a , '''do_pad''' ) )
self.assertTrue(hasattr(a , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(a , batched=a )
__lowerCamelCase = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(a , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(a , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
__lowerCamelCase = DetaImageProcessor()
__lowerCamelCase = image_processing(images=a , annotations=a , return_tensors='''pt''' )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , a )
__lowerCamelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
__lowerCamelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a )
__lowerCamelCase = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a , atol=1e-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a ) )
# verify class_labels
__lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a ) )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
__lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__lowerCamelCase = DetaImageProcessor(format='''coco_panoptic''' )
__lowerCamelCase = image_processing(images=a , annotations=a , masks_path=a , return_tensors='''pt''' )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , a )
__lowerCamelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
__lowerCamelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a )
__lowerCamelCase = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a , atol=1e-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a ) )
# verify class_labels
__lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a ) )
# verify masks
__lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , a )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a ) )
| 546 | '''simple docstring'''
from manim import *
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCamelCase = Rectangle(height=0.25 , width=0.25 )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = VGroup(a , a ).arrange(a , buff=0 )
__lowerCamelCase = Text('''CPU''' , font_size=24 )
__lowerCamelCase = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a )
__lowerCamelCase = [mem.copy() for i in range(4 )]
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = Text('''GPU''' , font_size=24 )
__lowerCamelCase = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
gpu.move_to([-1, -1, 0] )
self.add(a )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = Text('''Model''' , font_size=24 )
__lowerCamelCase = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
model.move_to([3, -1.0, 0] )
self.add(a )
__lowerCamelCase = []
__lowerCamelCase = []
for i, rect in enumerate(a ):
__lowerCamelCase = fill.copy().set_fill(a , opacity=0.8 )
target.move_to(a )
model_arr.append(a )
__lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(a )
self.add(*a , *a )
__lowerCamelCase = [meta_mem.copy() for i in range(6 )]
__lowerCamelCase = [meta_mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = VGroup(a , a ).arrange(a , buff=0 )
__lowerCamelCase = Text('''Disk''' , font_size=24 )
__lowerCamelCase = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
disk.move_to([-4, -1.25, 0] )
self.add(a , a )
__lowerCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCamelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a , a )
__lowerCamelCase = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a )
__lowerCamelCase = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a ) )
__lowerCamelCase = Square(0.3 )
input.set_fill(a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , a , buff=0.5 )
self.play(Write(a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=a , buff=0.02 )
self.play(MoveToTarget(a ) )
self.play(FadeOut(a ) )
__lowerCamelCase = Arrow(start=a , end=a , color=a , buff=0.5 )
a.next_to(model_arr[0].get_left() , a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__lowerCamelCase = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) )
__lowerCamelCase = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(a ) , Circumscribe(model_arr[0] , color=a , **a ) , Circumscribe(model_cpu_arr[0] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__lowerCamelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__lowerCamelCase = AnimationGroup(
FadeOut(a , run_time=0.5 ) , MoveToTarget(a , run_time=0.5 ) , FadeIn(a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__lowerCamelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **a ) , Circumscribe(cpu_left_col_base[i] , **a ) , Circumscribe(cpu_left_col_base[i + 1] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , Circumscribe(model_arr[i + 1] , color=a , **a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=a , **a ) , Circumscribe(cpu_left_col_base[-1] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__lowerCamelCase = a_c
__lowerCamelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(a ) , FadeOut(a , run_time=0.5 ) , )
__lowerCamelCase = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) , MoveToTarget(a ) )
self.wait()
| 546 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : str = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 93 | """simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , A_ : Optional[Any]=None , A_ : Union[str, Any]=None , *A_ : List[Any] , **A_ : Union[str, Any] ) -> List[Any]:
super().__init__(*A_ , **A_ )
if config is None:
assert isinstance(self.model , A_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
__snake_case = self.model.config
else:
__snake_case = config
__snake_case = data_args
__snake_case = self.config.tgt_vocab_size if isinstance(self.config , A_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
__snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__snake_case = label_smoothed_nll_loss
def lowercase ( self : List[Any] , A_ : int ) -> Union[str, Any]:
if self.optimizer is None:
__snake_case = ['''bias''', '''LayerNorm.weight''']
__snake_case = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
__snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__snake_case = Adafactor
__snake_case = {'''scale_parameter''': False, '''relative_step''': False}
else:
__snake_case = AdamW
__snake_case = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
__snake_case = self.args.learning_rate
if self.sharded_ddp:
__snake_case = OSS(
params=A_ , optim=A_ , **A_ , )
else:
__snake_case = optimizer_cls(A_ , **A_ )
if self.lr_scheduler is None:
__snake_case = self._get_lr_scheduler(A_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase ( self : Any , A_ : Dict ) -> Dict:
__snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A_ )
return scheduler
def lowercase ( self : List[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase ( self : List[Any] , A_ : str , A_ : str , A_ : List[str] ) -> Dict:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__snake_case = model(**A_ , use_cache=A_ )[0]
__snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__snake_case , __snake_case = model(**A_ , labels=A_ , use_cache=A_ )[:2]
else:
# compute label smoothed loss
__snake_case = model(**A_ , use_cache=A_ )[0]
__snake_case = torch.nn.functional.log_softmax(A_ , dim=-1 )
__snake_case , __snake_case = self.loss_fn(A_ , A_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase ( self : int , A_ : Tuple , A_ : List[str] ) -> List[Any]:
__snake_case = inputs.pop('''labels''' )
__snake_case , __snake_case = self._compute_loss(A_ , A_ , A_ )
return loss
def lowercase ( self : Union[str, Any] , A_ : nn.Module , A_ : Dict[str, Union[torch.Tensor, Any]] , A_ : bool , A_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__snake_case = self._prepare_inputs(A_ )
__snake_case = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__snake_case = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__snake_case = self._pad_tensors_to_max_len(A_ , gen_kwargs['''max_length'''] )
__snake_case = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
__snake_case , __snake_case = self._compute_loss(A_ , A_ , A_ )
__snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__snake_case = self._pad_tensors_to_max_len(A_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase ( self : Union[str, Any] , A_ : Any , A_ : List[str] ) -> Optional[Any]:
# If PAD token is not defined at least EOS token has to be defined
__snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
__snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__snake_case = tensor
return padded_tensor | 93 | 1 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Dict=0.999, UpperCAmelCase_ : Tuple="cosine", ) -> List[str]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase_ : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
A__ = []
for i in range(UpperCAmelCase_ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ), UpperCAmelCase_ ) )
return torch.tensor(UpperCAmelCase_, dtype=torch.floataa )
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
A__ : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
A__ : str = 2
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 1000 , SCREAMING_SNAKE_CASE__ = 0.0_0_0_8_5 , SCREAMING_SNAKE_CASE__ = 0.0_1_2 , SCREAMING_SNAKE_CASE__ = "linear" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "epsilon" , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = "linspace" , SCREAMING_SNAKE_CASE__ = 0 , ) -> Optional[int]:
if trained_betas is not None:
A__ = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
A__ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = use_karras_sigmas
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Optional[int]:
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(SCREAMING_SNAKE_CASE__ ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case__ ( self ) -> Any:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> torch.FloatTensor:
A__ = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
A__ = self.sigmas[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> Dict:
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0 , SCREAMING_SNAKE_CASE__ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(SCREAMING_SNAKE_CASE__ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = np.log(SCREAMING_SNAKE_CASE__ )
A__ = np.interp(SCREAMING_SNAKE_CASE__ , np.arange(0 , len(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
if self.config.use_karras_sigmas:
A__ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE__ , num_inference_steps=self.num_inference_steps )
A__ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for sigma in sigmas] )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ )
A__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
A__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
# mps does not support float64
A__ = timesteps.to(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
else:
A__ = timesteps.to(device=SCREAMING_SNAKE_CASE__ )
# empty dt and derivative
A__ = None
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
# get log sigma
A__ = np.log(SCREAMING_SNAKE_CASE__ )
# get distribution
A__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = log_sigmas[low_idx]
A__ = log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = np.clip(SCREAMING_SNAKE_CASE__ , 0 , 1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.reshape(sigma.shape )
return t
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> torch.FloatTensor:
A__ = in_sigmas[-1].item()
A__ = in_sigmas[0].item()
A__ = 7.0 # 7.0 is the value used in the paper
A__ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE__ )
A__ = sigma_min ** (1 / rho)
A__ = sigma_max ** (1 / rho)
A__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def snake_case__ ( self ) -> Optional[Any]:
return self.dt is None
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ) -> Union[SchedulerOutput, Tuple]:
A__ = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_next - sigma_hat
# store for 2nd order step
A__ = derivative
A__ = dt
A__ = sample
else:
# 2. 2nd order / Heun's method
A__ = (sample - pred_original_sample) / sigma_next
A__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A__ = self.dt
A__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A__ = None
A__ = None
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
A__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 104 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : int = "layoutlmv3"
def __init__( self , SCREAMING_SNAKE_CASE__=50265 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Any:
super().__init__(
vocab_size=SCREAMING_SNAKE_CASE__ , hidden_size=SCREAMING_SNAKE_CASE__ , num_hidden_layers=SCREAMING_SNAKE_CASE__ , num_attention_heads=SCREAMING_SNAKE_CASE__ , intermediate_size=SCREAMING_SNAKE_CASE__ , hidden_act=SCREAMING_SNAKE_CASE__ , hidden_dropout_prob=SCREAMING_SNAKE_CASE__ , attention_probs_dropout_prob=SCREAMING_SNAKE_CASE__ , max_position_embeddings=SCREAMING_SNAKE_CASE__ , type_vocab_size=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , layer_norm_eps=SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A__ = max_ad_position_embeddings
A__ = coordinate_size
A__ = shape_size
A__ = has_relative_attention_bias
A__ = rel_pos_bins
A__ = max_rel_pos
A__ = has_spatial_attention_bias
A__ = rel_ad_pos_bins
A__ = max_rel_ad_pos
A__ = text_embed
A__ = visual_embed
A__ = input_size
A__ = num_channels
A__ = patch_size
A__ = classifier_dropout
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Union[str, Any] = version.parse("1.12" )
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def snake_case__ ( self ) -> float:
return 1e-5
@property
def snake_case__ ( self ) -> int:
return 12
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , "apply_ocr" , SCREAMING_SNAKE_CASE__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = processor.tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
A__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
A__ = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A__ = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = dict(
processor(
SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ , boxes=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , ) )
return inputs
| 104 | 1 |
_UpperCamelCase = 0 # The first color of the flag.
_UpperCamelCase = 1 # The second color of the flag.
_UpperCamelCase = 2 # The third color of the flag.
_UpperCamelCase = (red, white, blue)
def _lowercase ( lowercase__ ):
if not sequence:
return []
if len(lowercase__ ) == 1:
return list(lowercase__ )
__lowerCAmelCase : str = 0
__lowerCAmelCase : List[Any] = len(lowercase__ ) - 1
__lowerCAmelCase : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
__lowerCAmelCase : Union[str, Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__lowerCAmelCase : int = sequence[high], sequence[mid]
high -= 1
else:
__lowerCAmelCase : Union[str, Any] = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(lowercase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input("Enter numbers separated by commas:\n").strip()
_UpperCamelCase = [int(item.strip()) for item in user_input.split(",")]
print(F"{dutch_national_flag_sort(unsorted)}")
| 714 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_UpperCamelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowercase :
def __init__( self , A_ , A_=16 , A_=13 , A_=7 , A_=14 , A_=10 , A_=19 , A_=5 , A_=4 , A_=True , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=[1, 2, 3, 4, 5] , A_=25 , A_=5 , ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : int = prediction_length
__lowerCAmelCase : str = context_length
__lowerCAmelCase : Any = cardinality
__lowerCAmelCase : Tuple = num_time_features
__lowerCAmelCase : List[str] = lags_sequence
__lowerCAmelCase : Any = embedding_dimension
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = context_length
__lowerCAmelCase : Any = prediction_length + label_length
__lowerCAmelCase : Union[str, Any] = label_length
__lowerCAmelCase : Any = moving_average
__lowerCAmelCase : Optional[int] = autocorrelation_factor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = config.context_length + max(config.lags_sequence )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase : Optional[int] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.get_config()
__lowerCAmelCase : Tuple = self.prepare_autoformer_inputs_dict(A_ )
return config, inputs_dict
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoformerModel(config=A_ ).to(A_ ).eval()
__lowerCAmelCase : Optional[int] = model(**A_ )
__lowerCAmelCase : Dict = outputs.encoder_last_hidden_state
__lowerCAmelCase : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Tuple = model.get_encoder()
encoder.save_pretrained(A_ )
__lowerCAmelCase : List[Any] = AutoformerEncoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Any = model.create_network_inputs(**A_ )
__lowerCAmelCase, __lowerCAmelCase : Dict = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase : Optional[Any] = encoder(inputs_embeds=A_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Dict = model.get_decoder()
decoder.save_pretrained(A_ )
__lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase : List[str] = decoder(
trend=A_ , inputs_embeds=A_ , encoder_hidden_states=A_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = AutoformerModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = model_class.from_pretrained(A_ , output_loading_info=A_ )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = inspect.signature(getattr(A_ , '''forward''' ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(A_ )
__lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : str = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A_ )] , A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''seq_length''' , A_ )
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , A_ )
__lowerCAmelCase : Any = getattr(self.model_tester , '''encoder_seq_length''' , A_ )
__lowerCAmelCase : List[str] = getattr(self.model_tester , '''d_model''' , A_ )
__lowerCAmelCase : int = getattr(self.model_tester , '''num_attention_heads''' , A_ )
__lowerCAmelCase : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Union[str, Any] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Any = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Dict = outputs.encoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase : Optional[Any] = len(A_ )
__lowerCAmelCase : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A_ , A_ )
# decoder attentions
__lowerCAmelCase : List[str] = outputs.decoder_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase : Any = outputs.cross_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 2 , len(A_ ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _lowercase ( lowercase__="train-batch.pt" ):
__lowerCAmelCase : List[str] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowercase__ , repo_type='''dataset''' )
__lowerCAmelCase : Tuple = torch.load(lowercase__ , map_location=lowercase__ )
return batch
@require_torch
@slow
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Tuple = prepare_batch()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__lowerCAmelCase : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__lowerCAmelCase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Any = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__lowerCAmelCase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A_ )
__lowerCAmelCase : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=A_ )
__lowerCAmelCase : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A_ , rtol=1e-1 ) )
| 583 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase = 256
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["melgan"]
def __init__( self :List[Any] , __A :SpectrogramNotesEncoder , __A :SpectrogramContEncoder , __A :TaFilmDecoder , __A :DDPMScheduler , __A :OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE__ = math.log(1E-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE__ = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE__ = 128
self.register_modules(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
def _snake_case ( self :str , __A :List[Any] , __A :Optional[int]=(-1.0, 1.0) , __A :Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output_range
if clip:
SCREAMING_SNAKE_CASE__ = torch.clip(__A , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self :Dict , __A :Tuple , __A :str=(-1.0, 1.0) , __A :List[str]=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input_range
SCREAMING_SNAKE_CASE__ = torch.clip(__A , __A , __A ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self :Union[str, Any] , __A :Any , __A :List[Any] , __A :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = input_tokens > 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.notes_encoder(
encoder_input_tokens=__A , encoder_inputs_mask=__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.continuous_encoder(
encoder_inputs=__A , encoder_inputs_mask=__A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self :Any , __A :int , __A :str , __A :Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = noise_time
if not torch.is_tensor(__A ):
SCREAMING_SNAKE_CASE__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ = self.decoder(
encodings_and_masks=__A , decoder_input_tokens=__A , decoder_noise_time=__A )
return logits
@torch.no_grad()
def __call__( self :Dict , __A :List[List[int]] , __A :Optional[torch.Generator] = None , __A :int = 100 , __A :bool = True , __A :str = "numpy" , __A :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A :int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
SCREAMING_SNAKE_CASE__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
for i, encoder_input_tokens in enumerate(__A ):
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE__ = ones
SCREAMING_SNAKE_CASE__ = self.scale_features(
__A , output_range=[-1.0, 1.0] , clip=__A )
SCREAMING_SNAKE_CASE__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__A , continuous_mask=__A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE__ = self.decode(
encodings_and_masks=__A , input_tokens=__A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , generator=__A ).prev_sample
SCREAMING_SNAKE_CASE__ = self.scale_to_features(__A , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE__ = mel[:1]
SCREAMING_SNAKE_CASE__ = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A )
logger.info("""Generated segment""" , __A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
SCREAMING_SNAKE_CASE__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__A ) | 6 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
snake_case__ = logging.get_logger(__name__)
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
# Get the sagemaker specific mp parameters from smp_options variable.
a__ :str = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a__ :str = json.loads(a )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a__ :Dict = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a__ :str = json.loads(a )
if not mpi_options.get("sagemaker_mpi_enabled" , a ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = field(
default='' ,metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} ,)
def _snake_case ( self : List[str] ) ->int:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , __A , )
@cached_property
def _snake_case ( self : List[Any] ) ->"torch.device":
"""simple docstring"""
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
a__ :str = torch.device("cpu" )
a__ :Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
a__ :Union[str, Any] = smp.local_rank()
a__ :Tuple = torch.device("cuda" , __A )
a__ :Any = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
a__ :Optional[Any] = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
a__ :Any = torch.device("cuda" , self.local_rank )
a__ :List[Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a__ :Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a__ :Any = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
a__ :List[Any] = torch.device("cuda" , self.local_rank )
a__ :Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def _snake_case ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _snake_case ( self : int ) ->Dict:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def _snake_case ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return False
| 395 | 0 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase(self ):
A_ : Optional[Any] = logging.get_logger()
# the current default level is logging.WARNING
A_ : Optional[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : List[str] = logging.get_verbosity()
A_ : Dict = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
A_ : Tuple = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning(lowerCAmelCase_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning(lowerCAmelCase_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning(lowerCAmelCase_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(lowerCAmelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCamelCase(self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
A_ : Optional[int] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
A_ : Optional[Any] = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCAmelCase_ )
A_ : str = logging.log_levels[env_level_str]
A_ : str = logging.get_verbosity()
self.assertEqual(
lowerCAmelCase_ , lowerCAmelCase_ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
A_ : List[str] = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCamelCase(self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
A_ : Tuple = logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCamelCase(self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
A_ : Tuple = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
A_ : List[Any] = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning_advice(lowerCAmelCase_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase_ ) as cl:
logger.warning_advice(lowerCAmelCase_ )
self.assertEqual(cl.out , msg + """\n""" )
def __UpperCamelCase ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 480 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = """facebook/bart-large-mnli"""
_A : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_A : List[str] = """text_classifier"""
_A : Optional[int] = AutoTokenizer
_A : Optional[Any] = AutoModelForSequenceClassification
_A : List[str] = ["""text""", ["""text"""]]
_A : Dict = ["""text"""]
def lowerCamelCase(self ):
super().setup()
A_ : int = self.model.config
A_ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
A_ : List[Any] = int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : str = outputs.logits
A_ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 480 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase_ = ''
UpperCAmelCase_ = ''
UpperCAmelCase_ = ''
UpperCAmelCase_ = ''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = tweepy.OAuthHandler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
auth.set_access_token(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = tweepy.API(SCREAMING_SNAKE_CASE__ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase__ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase__ = api.user_timeline(screen_name=SCREAMING_SNAKE_CASE__ , count=200 )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__ )
# save the id of the oldest tweet less one
UpperCAmelCase__ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(SCREAMING_SNAKE_CASE__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase__ = api.user_timeline(
screen_name=SCREAMING_SNAKE_CASE__ , count=200 , max_id=SCREAMING_SNAKE_CASE__ )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__ )
# update the id of the oldest tweet less one
UpperCAmelCase__ = alltweets[-1].id - 1
print(F'''...{len(SCREAMING_SNAKE_CASE__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , """w""" ) as f:
UpperCAmelCase__ = csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 603 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : Union[str, Any] = logging.getLogger()
_lowerCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
os.makedirs(a_ ,exist_ok=a_ )
lowerCAmelCase__ = {'source': 'What is love ?', 'target': 'life'}
lowerCAmelCase__ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase__ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(a_ ,f'{split}.{field}' ) ,'w' ) as f:
f.write(a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = "pytorch" ):
"""simple docstring"""
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = os.path.join(a_ ,'output' )
lowerCAmelCase__ = os.path.join(a_ ,'data' )
self._create_dummy_data(data_dir=a_ )
lowerCAmelCase__ = f'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(f'--gpus={gpus}' )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCAmelCase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(a_ ,env=self.get_env() )
lowerCAmelCase__ = os.path.join(a_ ,'metrics.json' )
with open(a_ ) as f:
lowerCAmelCase__ = json.load(a_ )
return result
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
@require_torch_gpu
@require_ray
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self._run_finetune(gpus=1 ,distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
@require_torch_multi_gpu
@require_ray
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self._run_finetune(gpus=1 ,distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
| 604 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'M-CLIP'
def __init__( self ,a_=1024 ,a_=768 ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = transformerDimSize
lowerCAmelCase__ = imageDimSize
super().__init__(**a_ )
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = MCLIPConfig
def __init__( self ,a_ ,*a_ ,**a_ ):
"""simple docstring"""
super().__init__(a_ ,*a_ ,**a_ )
lowerCAmelCase__ = XLMRobertaModel(a_ )
lowerCAmelCase__ = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.transformer(input_ids=a_ ,attention_mask=a_ )[0]
lowerCAmelCase__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a_ ), embs
| 604 | 1 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _lowerCAmelCase ( lowerCamelCase ):
@require_torch
def _a ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task="fill-mask" , model=a_ )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = "1"
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _a ( self ) -> Any:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task="fill-mask" , model=a_ )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _a ( self ) -> int:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = "1"
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = "\nfrom transformers import pipeline\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = "1"
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _a ( self ) -> Tuple:
_UpperCAmelCase = "\nfrom transformers import AutoModel\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = "1"
_UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : int =logging.get_logger(__name__)
def A__ ( __A ):
'''simple docstring'''
# initialize config
if "resnet-50" in model_name:
_lowerCamelCase : List[Any] = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
_lowerCamelCase : str = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
_lowerCamelCase : Optional[Any] = DetrConfig(use_timm_backbone=__A , backbone_config=__A )
# set label attributes
_lowerCamelCase : Dict = """panoptic""" in model_name
if is_panoptic:
_lowerCamelCase : Any = 250
else:
_lowerCamelCase : List[str] = 91
_lowerCamelCase : Optional[int] = """huggingface/label-files"""
_lowerCamelCase : int = """coco-detection-id2label.json"""
_lowerCamelCase : int = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
_lowerCamelCase : Any = {int(__A ): v for k, v in idalabel.items()}
_lowerCamelCase : str = idalabel
_lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = state_dict.pop(__A )
_lowerCamelCase : Dict = val
def A__ ( __A , __A=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = """"""
if is_panoptic:
_lowerCamelCase : Tuple = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : List[str] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[:256, :]
_lowerCamelCase : int = in_proj_bias[:256]
_lowerCamelCase : Optional[Any] = in_proj_weight[256:512, :]
_lowerCamelCase : Tuple = in_proj_bias[256:512]
_lowerCamelCase : List[Any] = in_proj_weight[-256:, :]
_lowerCamelCase : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCamelCase : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[:256, :]
_lowerCamelCase : List[str] = in_proj_bias[:256]
_lowerCamelCase : Dict = in_proj_weight[256:512, :]
_lowerCamelCase : Union[str, Any] = in_proj_bias[256:512]
_lowerCamelCase : Optional[Any] = in_proj_weight[-256:, :]
_lowerCamelCase : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowerCamelCase : Union[str, Any] = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_lowerCamelCase : Any = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCamelCase : int = in_proj_weight_cross_attn[:256, :]
_lowerCamelCase : Any = in_proj_bias_cross_attn[:256]
_lowerCamelCase : List[str] = in_proj_weight_cross_attn[256:512, :]
_lowerCamelCase : Any = in_proj_bias_cross_attn[256:512]
_lowerCamelCase : Any = in_proj_weight_cross_attn[-256:, :]
_lowerCamelCase : Optional[int] = in_proj_bias_cross_attn[-256:]
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCamelCase : str = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A__ ( __A , __A=None , __A=False ):
'''simple docstring'''
_lowerCamelCase : List[str] = get_detr_config(__A )
# load original model from torch hub
_lowerCamelCase : Any = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F"""Converting model {model_name}...""" )
_lowerCamelCase : Optional[int] = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=__A ).eval()
_lowerCamelCase : Optional[Any] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__A ):
if is_panoptic:
_lowerCamelCase : Optional[int] = """detr.""" + src
rename_key(__A , __A , __A )
# query, key and value matrices need special treatment
read_in_q_k_v(__A , is_panoptic=__A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : str = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_lowerCamelCase : Tuple = state_dict.pop(__A )
_lowerCamelCase : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : int = state_dict.pop(__A )
_lowerCamelCase : Union[str, Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_lowerCamelCase : List[str] = state_dict.pop(__A )
_lowerCamelCase : Optional[Any] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_lowerCamelCase : Any = state_dict.pop(__A )
_lowerCamelCase : Any = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : List[str] = DetrForSegmentation(__A ) if is_panoptic else DetrForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
# verify our conversion on an image
_lowerCamelCase : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
_lowerCamelCase : List[Any] = DetrImageProcessor(format=__A )
_lowerCamelCase : Optional[int] = processor(images=prepare_img() , return_tensors="""pt""" )
_lowerCamelCase : Optional[int] = encoding["""pixel_values"""]
_lowerCamelCase : List[Any] = detr(__A )
_lowerCamelCase : Tuple = model(__A )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
lowerCAmelCase : int =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[int]:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
UpperCAmelCase = str(abs(lowerCamelCase__ ) )
UpperCAmelCase = [list(lowerCamelCase__ ) for char in range(len(lowerCamelCase__ ) )]
for index in range(len(lowerCamelCase__ ) ):
num_transpositions[index].pop(lowerCamelCase__ )
return max(
int("""""".join(list(lowerCamelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 377 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCAmelCase__ = '''scheduler_config.json'''
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 1
lowercase_ = 2
lowercase_ = 3
lowercase_ = 4
lowercase_ = 5
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__:
"""simple docstring"""
lowercase_ = SCHEDULER_CONFIG_NAME
lowercase_ = ["""dtype"""]
lowercase_ = []
lowercase_ = True
@classmethod
def snake_case ( cls : Optional[Any] , SCREAMING_SNAKE_CASE : Dict[str, Any] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[Any]=False , **SCREAMING_SNAKE_CASE : str , ):
lowercase__ , lowercase__ : Union[str, Any] = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , subfolder=SCREAMING_SNAKE_CASE , return_unused_kwargs=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowercase__ , lowercase__ : int = cls.from_config(SCREAMING_SNAKE_CASE , return_unused_kwargs=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , "create_state" ) and getattr(SCREAMING_SNAKE_CASE , "has_state" , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : Optional[int] ):
self.save_config(save_directory=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : int ):
return self._get_compatibles()
@classmethod
def snake_case ( cls : Tuple ):
lowercase__ : Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
lowercase__ : List[str] = importlib.import_module(__name__.split("." )[0] )
lowercase__ : Tuple = [
getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
return compatible_classes
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert len(lowerCamelCase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase__ ) - x.ndim) ) , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=0.999 , lowerCamelCase__=jnp.floataa ):
"""simple docstring"""
def alpha_bar(lowerCamelCase__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase__ : Dict = []
for i in range(lowerCamelCase__ ):
lowercase__ : List[str] = i / num_diffusion_timesteps
lowercase__ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase__ ) / alpha_bar(lowerCamelCase__ ) , lowerCamelCase__ ) )
return jnp.array(lowerCamelCase__ , dtype=lowerCamelCase__ )
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : List[Any] = scheduler.config
if config.trained_betas is not None:
lowercase__ : List[str] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase__ : Optional[int] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ : List[str] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ : Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase__ : Dict = 1.0 - betas
lowercase__ : List[Any] = jnp.cumprod(SCREAMING_SNAKE_CASE , axis=0 )
return cls(
alphas=SCREAMING_SNAKE_CASE , betas=SCREAMING_SNAKE_CASE , alphas_cumprod=SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = state.alphas_cumprod
lowercase__ : int = alphas_cumprod[timesteps] ** 0.5
lowercase__ : Optional[int] = sqrt_alpha_prod.flatten()
lowercase__ : Tuple = broadcast_to_shape_from_left(lowerCamelCase__ , original_samples.shape )
lowercase__ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase__ : Optional[Any] = broadcast_to_shape_from_left(lowerCamelCase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : Dict = get_sqrt_alpha_prod(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : int = get_sqrt_alpha_prod(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 496 | 0 |
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Dict:
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) ,end='\t' )
else:
print('INF' ,end='\t' )
print()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Dict:
lowerCamelCase_ = [[float('inf' ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
lowerCamelCase_ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__UpperCamelCase ):
# looping through rows of graph array
for i in range(__UpperCamelCase ):
# looping through columns of graph array
for j in range(__UpperCamelCase ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCamelCase_ = dist[i][k] + dist[k][j]
_print_dist(__UpperCamelCase ,__UpperCamelCase )
return dist, v
if __name__ == "__main__":
A_ = int(input("Enter number of vertices: "))
A_ = int(input("Enter number of edges: "))
A_ = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
A_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
A_ = int(input("Enter source:"))
A_ = int(input("Enter destination:"))
A_ = float(input("Enter weight:"))
A_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 720 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ = "src/transformers"
A_ = "docs/source/en/tasks"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
with open(__UpperCamelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
lowerCamelCase_ = f.readlines()
# Find the start prompt.
lowerCamelCase_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
lowerCamelCase_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(TRANSFORMERS_PATH)
A_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
lowerCamelCase_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' ,end_prompt='<!--End of the generated tip-->' ,)
lowerCamelCase_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 384 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :Any = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
UpperCAmelCase_ :Tuple = load_dataset('''ashraq/esc50''' )
UpperCAmelCase_ :Optional[Any] = dataset['''train''']['''audio'''][-1]['''array''']
UpperCAmelCase_ :str = audio_classifier(snake_case , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(snake_case ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def snake_case_ ( self : str ):
pass
@slow
@require_torch
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
UpperCAmelCase_ :Any = load_dataset('''ashraq/esc50''' )
UpperCAmelCase_ :List[Any] = dataset['''train''']['''audio'''][-1]['''array''']
UpperCAmelCase_ :Tuple = audio_classifier(snake_case , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(snake_case ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
UpperCAmelCase_ :int = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(snake_case ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
UpperCAmelCase_ :List[str] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(snake_case ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def snake_case_ ( self : Dict ):
pass
| 608 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ ="""align_text_model"""
def __init__( self : List[Any] , snake_case : Tuple=30_522 , snake_case : Any=768 , snake_case : str=12 , snake_case : Optional[Any]=12 , snake_case : str=3_072 , snake_case : int="gelu" , snake_case : List[Any]=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=512 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : int=1e-12 , snake_case : Any=0 , snake_case : Optional[int]="absolute" , snake_case : List[Any]=True , **snake_case : Tuple , ):
super().__init__(**snake_case )
UpperCAmelCase_ :Optional[Any] = vocab_size
UpperCAmelCase_ :Union[str, Any] = hidden_size
UpperCAmelCase_ :Optional[int] = num_hidden_layers
UpperCAmelCase_ :Any = num_attention_heads
UpperCAmelCase_ :int = hidden_act
UpperCAmelCase_ :Any = intermediate_size
UpperCAmelCase_ :str = hidden_dropout_prob
UpperCAmelCase_ :str = attention_probs_dropout_prob
UpperCAmelCase_ :Any = max_position_embeddings
UpperCAmelCase_ :Dict = type_vocab_size
UpperCAmelCase_ :int = initializer_range
UpperCAmelCase_ :List[str] = layer_norm_eps
UpperCAmelCase_ :Optional[Any] = position_embedding_type
UpperCAmelCase_ :Dict = use_cache
UpperCAmelCase_ :Tuple = pad_token_id
@classmethod
def snake_case_ ( cls : int , snake_case : Union[str, os.PathLike] , **snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
UpperCAmelCase_ ,UpperCAmelCase_ :Optional[int] = cls.get_config_dict(snake_case , **snake_case )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCAmelCase_ :List[str] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case , **snake_case )
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ ="""align_vision_model"""
def __init__( self : Dict , snake_case : int = 3 , snake_case : int = 600 , snake_case : float = 2.0 , snake_case : float = 3.1 , snake_case : int = 8 , snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , snake_case : List[int] = [32, 16, 24, 40, 80, 112, 192] , snake_case : List[int] = [16, 24, 40, 80, 112, 192, 320] , snake_case : List[int] = [] , snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case : float = 0.25 , snake_case : str = "swish" , snake_case : int = 2_560 , snake_case : str = "mean" , snake_case : float = 0.02 , snake_case : float = 0.001 , snake_case : float = 0.99 , snake_case : float = 0.2 , **snake_case : int , ):
super().__init__(**snake_case )
UpperCAmelCase_ :str = num_channels
UpperCAmelCase_ :str = image_size
UpperCAmelCase_ :List[str] = width_coefficient
UpperCAmelCase_ :Any = depth_coefficient
UpperCAmelCase_ :Any = depth_divisor
UpperCAmelCase_ :int = kernel_sizes
UpperCAmelCase_ :List[Any] = in_channels
UpperCAmelCase_ :Dict = out_channels
UpperCAmelCase_ :List[str] = depthwise_padding
UpperCAmelCase_ :Dict = strides
UpperCAmelCase_ :Optional[int] = num_block_repeats
UpperCAmelCase_ :Optional[Any] = expand_ratios
UpperCAmelCase_ :str = squeeze_expansion_ratio
UpperCAmelCase_ :Tuple = hidden_act
UpperCAmelCase_ :Dict = hidden_dim
UpperCAmelCase_ :Any = pooling_type
UpperCAmelCase_ :Any = initializer_range
UpperCAmelCase_ :str = batch_norm_eps
UpperCAmelCase_ :Union[str, Any] = batch_norm_momentum
UpperCAmelCase_ :Dict = drop_connect_rate
UpperCAmelCase_ :Union[str, Any] = sum(snake_case ) * 4
@classmethod
def snake_case_ ( cls : Dict , snake_case : Union[str, os.PathLike] , **snake_case : Optional[int] ):
cls._set_token_in_kwargs(snake_case )
UpperCAmelCase_ ,UpperCAmelCase_ :List[str] = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCAmelCase_ :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case , **snake_case )
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ ="""align"""
UpperCamelCase__ =True
def __init__( self : List[str] , snake_case : List[str]=None , snake_case : Optional[int]=None , snake_case : Union[str, Any]=640 , snake_case : int=1.0 , snake_case : Any=0.02 , **snake_case : Optional[int] , ):
super().__init__(**snake_case )
if text_config is None:
UpperCAmelCase_ :Optional[Any] = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
UpperCAmelCase_ :int = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
UpperCAmelCase_ :int = AlignTextConfig(**snake_case )
UpperCAmelCase_ :Any = AlignVisionConfig(**snake_case )
UpperCAmelCase_ :Dict = projection_dim
UpperCAmelCase_ :Dict = temperature_init_value
UpperCAmelCase_ :List[Any] = initializer_range
@classmethod
def snake_case_ ( cls : str , snake_case : AlignTextConfig , snake_case : AlignVisionConfig , **snake_case : int ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case )
def snake_case_ ( self : Optional[Any] ):
UpperCAmelCase_ :List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ :Tuple = self.text_config.to_dict()
UpperCAmelCase_ :Dict = self.vision_config.to_dict()
UpperCAmelCase_ :int = self.__class__.model_type
return output
| 608 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : str=18 , _lowerCAmelCase : List[Any]=30 , _lowerCAmelCase : Tuple=400 , _lowerCAmelCase : Any=True , _lowerCAmelCase : int=None , _lowerCAmelCase : Dict=True , ) -> str:
_lowerCAmelCase = size if size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = apply_ocr
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
_lowerCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'apply_ocr' ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _lowerCAmelCase )
self.assertIsInstance(encoding.boxes , _lowerCAmelCase )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
# with apply_OCR = True
_lowerCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowerCAmelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_lowerCAmelCase = Image.open(ds[0]['file'] ).convert('RGB' )
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowerCAmelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_lowerCAmelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCAmelCase )
self.assertListEqual(encoding.boxes , _lowerCAmelCase )
# with apply_OCR = False
_lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase )
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 715 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase:
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class __lowercase:
"""simple docstring"""
def __init__( self : Any , _lowerCAmelCase : int ) -> int:
_lowerCAmelCase = [[] for _ in range(_lowerCAmelCase )]
_lowerCAmelCase = size
def __getitem__( self : Any , _lowerCAmelCase : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
return self._size
def SCREAMING_SNAKE_CASE_ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> Optional[Any]:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(_lowerCAmelCase , _lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | None:
_lowerCAmelCase = deque([start_vertex] )
_lowerCAmelCase = [None] * self.size
_lowerCAmelCase = 0
while queue:
_lowerCAmelCase = queue.popleft()
_lowerCAmelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowerCAmelCase = current_distance + edge.weight
_lowerCAmelCase = distances[edge.destination_vertex]
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
_lowerCAmelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585 | 0 |
'''simple docstring'''
from typing import Any
def A ( UpperCamelCase_ : list , UpperCamelCase_ : list , UpperCamelCase_ : dict , UpperCamelCase_ : dict , UpperCamelCase_ : dict , ) -> list:
'''simple docstring'''
_validation(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ""
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(UpperCamelCase_ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ""
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(UpperCamelCase_ ) - 1 , -1 , -1 ):
result.append(UpperCamelCase_ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
_validate_lists(UpperCamelCase_ , UpperCamelCase_ )
_validate_dicts(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
_validate_list(UpperCamelCase_ , "observations_space" )
_validate_list(UpperCamelCase_ , "states_space" )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , UpperCamelCase_ ):
lowerCAmelCase__ = F"""{var_name} must be a list"""
raise ValueError(UpperCamelCase_ )
else:
for x in _object:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = F"""{var_name} must be a list of strings"""
raise ValueError(UpperCamelCase_ )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None:
'''simple docstring'''
_validate_dict(UpperCamelCase_ , "initial_probabilities" , UpperCamelCase_ )
_validate_nested_dict(UpperCamelCase_ , "transition_probabilities" )
_validate_nested_dict(UpperCamelCase_ , "emission_probabilities" )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , UpperCamelCase_ , UpperCamelCase_ )
for x in _object.values():
_validate_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : type , UpperCamelCase_ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , UpperCamelCase_ ):
lowerCAmelCase__ = F"""{var_name} must be a dict"""
raise ValueError(UpperCamelCase_ )
if not all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for x in _object ):
lowerCAmelCase__ = F"""{var_name} all keys must be strings"""
raise ValueError(UpperCamelCase_ )
if not all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for x in _object.values() ):
lowerCAmelCase__ = "nested dictionary " if nested else ""
lowerCAmelCase__ = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(UpperCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class A ( __snake_case ):
__magic_name__ = '''switch_transformers'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , SCREAMING_SNAKE_CASE=32128 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0.01 , SCREAMING_SNAKE_CASE="float32" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.001 , SCREAMING_SNAKE_CASE=0.001 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , **SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = vocab_size
A : Any = d_model
A : Dict = d_kv
A : Optional[Any] = d_ff
A : List[Any] = num_sparse_encoder_layers
A : List[Any] = num_layers
A : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A : List[str] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A : int = self.num_layers // self.num_sparse_encoder_layers
else:
A : int = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A : str = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
A : List[Any] = num_heads
A : str = num_experts
A : Dict = expert_capacity
A : Optional[int] = router_bias
A : Any = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A : Dict = router_dtype
A : int = router_ignore_padding_tokens
A : List[Any] = relative_attention_num_buckets
A : Dict = relative_attention_max_distance
A : Optional[int] = dropout_rate
A : Dict = layer_norm_epsilon
A : Optional[int] = initializer_factor
A : Union[str, Any] = feed_forward_proj
A : Any = use_cache
A : Tuple = add_router_probs
A : List[str] = router_z_loss_coef
A : str = router_aux_loss_coef
A : Union[str, Any] = self.feed_forward_proj.split('''-''' )
A : Optional[Any] = act_info[-1]
A : Union[str, Any] = act_info[0] == '''gated'''
if len(SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A : str = '''gelu_new'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 634 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict =logging.get_logger(__name__)
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCamelCase_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase_ = get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ):
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCamelCase_ = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 702 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase : int =25_0004
UpperCAmelCase : Dict =25_0020
@require_sentencepiece
@require_tokenizers
class _lowercase (a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = MBartTokenizer
lowercase__ = MBartTokenizerFast
lowercase__ = True
lowercase__ = True
def _lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = MBartTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = MBartTokenizer(snake_case__ , keep_accents=snake_case__ )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase_ = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCamelCase_ = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = tokenizer_r.save_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCamelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCamelCase_ = tokenizer_r.from_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCamelCase_ = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCamelCase_ = tokenizer_r.from_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCamelCase_ = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase_ = tokenizer_r.from_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase (unittest.TestCase ):
'''simple docstring'''
lowercase__ = """facebook/mbart-large-en-ro"""
lowercase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowercase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowercase__ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def _lowerCamelCase ( cls ):
'''simple docstring'''
UpperCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCamelCase_ = 1
return cls
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
UpperCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
UpperCamelCase_ = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , snake_case__ )
UpperCamelCase_ = 10
UpperCamelCase_ = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , snake_case__ )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0026, 25_0001] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = MBartTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors="pt" )
UpperCamelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCamelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors="pt" )
UpperCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors="pt" )
UpperCamelCase_ = targets["input_ids"]
UpperCamelCase_ = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 25_0004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
} , )
| 504 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=32 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=16 , _UpperCamelCase=[32, 64, 128] , _UpperCamelCase=[1, 2, 1] , _UpperCamelCase=[2, 2, 4] , _UpperCamelCase=2 , _UpperCamelCase=2.0 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=10 , _UpperCamelCase=8 , _UpperCamelCase=["stage1", "stage2"] , _UpperCamelCase=[1, 2] , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = FocalNetModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = FocalNetBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = FocalNetBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = FocalNetForMaskedImageModeling(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FocalNetForMaskedImageModeling(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FocalNetForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FocalNetForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__A : Union[str, Any] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__A : int = False
__A : int = False
__A : Dict = False
__A : str = False
__A : Tuple = False
def UpperCamelCase( self ):
_UpperCAmelCase = FocalNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 , has_text_modality=_UpperCamelCase )
def UpperCamelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ):
return
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# FocalNet has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reshaped_hidden_states[0].shape
_UpperCAmelCase = (
reshaped_hidden_states[0].view(_UpperCamelCase , _UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
@slow
def UpperCamelCase( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = FocalNetModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCAmelCase = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([0.2166, -0.4368, 0.2191] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Optional[int] = (FocalNetBackbone,) if is_torch_available() else ()
__A : str = FocalNetConfig
__A : Any = False
def UpperCamelCase( self ):
_UpperCAmelCase = FocalNetModelTester(self ) | 32 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 1 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowercase__ ='src/diffusers'
# Matches is_xxx_available()
lowercase__ =re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
lowercase__ =re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
lowercase__ ='\n{0} = None\n'
lowercase__ ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
lowercase__ ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def UpperCamelCase_ ( A__ ):
a_ = _re_backend.findall(A__ )
if len(A__ ) == 0:
return None
return "_and_".join(A__ )
def UpperCamelCase_ ( ):
with open(os.path.join(A__ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
a_ = f.readlines()
# Get to the point we do the actual imports for type checking
a_ = 0
a_ = {}
# Go through the end of the file
while line_index < len(A__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
a_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
a_ = []
# Until we unindent, add backend objects to the list
while line_index < len(A__ ) and len(lines[line_index] ) > 1:
a_ = lines[line_index]
a_ = _re_single_line_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(A__ ) > 0:
a_ = objects
else:
line_index += 1
return backend_specific_objects
def UpperCamelCase_ ( A__ , A__ ):
if name.isupper():
return DUMMY_CONSTANT.format(A__ )
elif name.islower():
return DUMMY_FUNCTION.format(A__ , A__ )
else:
return DUMMY_CLASS.format(A__ , A__ )
def UpperCamelCase_ ( A__=None ):
if backend_specific_objects is None:
a_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
a_ = {}
for backend, objects in backend_specific_objects.items():
a_ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
a_ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(A__ , A__ ) for o in objects] )
a_ = dummy_file
return dummy_files
def UpperCamelCase_ ( A__=False ):
a_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
a_ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
a_ = os.path.join(A__ , """utils""" )
a_ = {
backend: os.path.join(A__ , F'''dummy_{short_names.get(A__ , A__ )}_objects.py''' )
for backend in dummy_files.keys()
}
a_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(A__ ):
with open(A__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
a_ = f.read()
else:
a_ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(A__ , A__ )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(A__ , A__ )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ =parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 511 |
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
if n_term == "":
return []
a_ = []
for temp in range(int(A__ ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 511 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'The column name of the images in the files.'} )
_UpperCamelCase : Optional[str] = field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the training data.'} )
_UpperCamelCase : Optional[str] = field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} )
_UpperCamelCase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = {}
if self.train_dir is not None:
lowercase__ = self.train_dir
if self.validation_dir is not None:
lowercase__ = self.validation_dir
lowercase__ = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
default=UpperCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_UpperCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCamelCase : str = field(default=UpperCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
_UpperCamelCase : bool = field(
default=UpperCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_UpperCamelCase : float = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
_UpperCamelCase : bool = field(
default=UpperCAmelCase_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class SCREAMING_SNAKE_CASE (UpperCAmelCase_ ):
_UpperCamelCase : float = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def __UpperCamelCase () -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
lowercase__ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase__ = split["""train"""]
lowercase__ = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
lowercase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
lowercase__ = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
lowercase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
lowercase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowercase__ = ViTMAEForPreTraining(__lowerCAmelCase )
if training_args.do_train:
lowercase__ = ds["""train"""].column_names
else:
lowercase__ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase__ = data_args.image_column_name
elif "image" in column_names:
lowercase__ = """image"""
elif "img" in column_names:
lowercase__ = """img"""
else:
lowercase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase__ = image_processor.size["""shortest_edge"""]
else:
lowercase__ = (image_processor.size["""height"""], image_processor.size["""width"""])
lowercase__ = Compose(
[
Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowerCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_SCREAMING_SNAKE_CASE ):
lowercase__ = [transforms(__lowerCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowercase__ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowercase__ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCAmelCase )
# Compute absolute learning rate
lowercase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowercase__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics('eval' , __lowerCAmelCase )
trainer.save_metrics('eval' , __lowerCAmelCase )
# Write model card and (optionally) push to hub
lowercase__ = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 235 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = ['model.decoder.embed_positions.weights']
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
if "emb" in name:
__UpperCamelCase : Any = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__UpperCamelCase : str = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__UpperCamelCase : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__UpperCamelCase : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__UpperCamelCase : Optional[Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__UpperCamelCase : Tuple = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__UpperCamelCase : List[str] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__UpperCamelCase : Tuple = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__UpperCamelCase : Union[str, Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__UpperCamelCase : Optional[int] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase : Tuple = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __lowerCamelCase ( __lowerCAmelCase : OrderedDict , __lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
__UpperCamelCase : Tuple = list(state_dict.keys() )
__UpperCamelCase : List[Any] = {}
for key in keys:
__UpperCamelCase : Optional[Any] = state_dict.pop(__lowerCAmelCase )
__UpperCamelCase : Dict = rename_keys(__lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase : Optional[Any] = val[:hidden_size, :]
__UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase : List[str] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase : Dict = val
else:
__UpperCamelCase : Any = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( __lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__UpperCamelCase : int = 1024
__UpperCamelCase : Union[str, Any] = 24
__UpperCamelCase : int = 16
elif checkpoint == "medium":
__UpperCamelCase : List[Any] = 1536
__UpperCamelCase : Dict = 48
__UpperCamelCase : Dict = 24
elif checkpoint == "large":
__UpperCamelCase : List[Any] = 2048
__UpperCamelCase : str = 48
__UpperCamelCase : Optional[int] = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__UpperCamelCase : Any = MusicgenDecoderConfig(
hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , )
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str="cpu" ) -> Optional[int]:
__UpperCamelCase : str = MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase )
__UpperCamelCase : int = decoder_config_from_checkpoint(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = rename_state_dict(
__lowerCAmelCase , hidden_size=decoder_config.hidden_size )
__UpperCamelCase : List[Any] = TaEncoderModel.from_pretrained("""t5-base""" )
__UpperCamelCase : Dict = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__UpperCamelCase : List[str] = MusicgenForCausalLM(__lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase : Tuple = decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__UpperCamelCase : Tuple = MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase )
# check we can do a forward pass
__UpperCamelCase : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase : int = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""t5-base""" )
__UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__UpperCamelCase : Any = MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# set the appropriate bos/pad token ids
__UpperCamelCase : Tuple = 2048
__UpperCamelCase : int = 2048
# set other default generation config params
__UpperCamelCase : str = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase : List[str] = True
__UpperCamelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__lowerCAmelCase )
processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
UpperCamelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 269 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
__magic_name__ : torch.Tensor # [batch_size x 3]
__magic_name__ : torch.Tensor # [batch_size x 3]
__magic_name__ : torch.Tensor # [batch_size x 3]
__magic_name__ : torch.Tensor # [batch_size x 3]
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float
__magic_name__ : float
__magic_name__ : Tuple[int]
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa))
def UpperCAmelCase ( self) -> torch.Tensor:
'''simple docstring'''
snake_case__ : Tuple = torch.arange(self.height * self.width)
snake_case__ : List[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase__ , self.width , rounding_mode="trunc"),
] , axis=1 , )
return coords
@property
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = self.shape
snake_case__ : List[Any] = int(np.prod(lowerCamelCase__))
snake_case__ : List[Any] = self.get_image_coords()
snake_case__ : Tuple = torch.broadcast_to(coords.unsqueeze(0) , [batch_size * inner_batch_size, *coords.shape])
snake_case__ : Optional[Any] = self.get_camera_rays(lowerCamelCase__)
snake_case__ : int = rays.view(lowerCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3)
return rays
def UpperCAmelCase ( self , lowerCamelCase__) -> torch.Tensor:
'''simple docstring'''
snake_case__ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
snake_case__ : List[str] = coords.view(lowerCamelCase__ , -1 , 2)
snake_case__ : Any = self.resolution()
snake_case__ : Optional[Any] = self.fov()
snake_case__ : List[Any] = (flat.float() / (res - 1)) * 2 - 1
snake_case__ : Optional[int] = fracs * torch.tan(fov / 2)
snake_case__ : Union[str, Any] = fracs.view(lowerCamelCase__ , -1 , 2)
snake_case__ : List[str] = (
self.z.view(lowerCamelCase__ , 1 , 3)
+ self.x.view(lowerCamelCase__ , 1 , 3) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase__ , 1 , 3) * fracs[:, :, 1:]
)
snake_case__ : Dict = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase__)
snake_case__ : Optional[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase__ , 1 , 3) , [batch_size, directions.shape[1], 3]),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase__ , *lowerCamelCase__ , 2 , 3)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def A__ ( _UpperCAmelCase : int ) -> DifferentiableProjectiveCamera:
'''simple docstring'''
snake_case__ : List[str] = []
snake_case__ : str = []
snake_case__ : int = []
snake_case__ : List[str] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
snake_case__ : str = np.array([np.sin(_UpperCAmelCase ), np.cos(_UpperCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
snake_case__ : Dict = -z * 4
snake_case__ : Dict = np.array([np.cos(_UpperCAmelCase ), -np.sin(_UpperCAmelCase ), 0.0] )
snake_case__ : Dict = np.cross(_UpperCAmelCase , _UpperCAmelCase )
origins.append(_UpperCAmelCase )
xs.append(_UpperCAmelCase )
ys.append(_UpperCAmelCase )
zs.append(_UpperCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_UpperCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_UpperCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_UpperCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_UpperCAmelCase , axis=0 ) ).float() , width=_UpperCAmelCase , height=_UpperCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_UpperCAmelCase )) , )
| 705 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
snake_case__ : List[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
snake_case__ : List[str] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
snake_case__ : Any = transform(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
return image
def A__ ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if "visual_encoder" in key:
snake_case__ : Any = re.sub("visual_encoder*" , "vision_model.encoder" , _UpperCAmelCase )
if "blocks" in key:
snake_case__ : List[Any] = re.sub(r"blocks" , "layers" , _UpperCAmelCase )
if "attn" in key:
snake_case__ : Optional[Any] = re.sub(r"attn" , "self_attn" , _UpperCAmelCase )
if "norm1" in key:
snake_case__ : List[str] = re.sub(r"norm1" , "layer_norm1" , _UpperCAmelCase )
if "norm2" in key:
snake_case__ : Union[str, Any] = re.sub(r"norm2" , "layer_norm2" , _UpperCAmelCase )
if "encoder.norm" in key:
snake_case__ : List[Any] = re.sub(r"encoder.norm" , "post_layernorm" , _UpperCAmelCase )
if "encoder.patch_embed.proj" in key:
snake_case__ : List[str] = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , _UpperCAmelCase )
if "encoder.pos_embed" in key:
snake_case__ : List[Any] = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , _UpperCAmelCase )
if "encoder.cls_token" in key:
snake_case__ : Optional[Any] = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , _UpperCAmelCase )
if "self_attn" in key:
snake_case__ : Optional[Any] = re.sub(r"self_attn.proj" , "self_attn.projection" , _UpperCAmelCase )
return key
@torch.no_grad()
def A__ ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict=None ) -> int:
'''simple docstring'''
if config_path is not None:
snake_case__ : List[Any] = BlipConfig.from_pretrained(_UpperCAmelCase )
else:
snake_case__ : Optional[int] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
snake_case__ : Tuple = BlipForConditionalGeneration(_UpperCAmelCase ).eval()
snake_case__ : Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
snake_case__ : Optional[Any] = blip_decoder(pretrained=_UpperCAmelCase , image_size=3_84 , vit="base" )
snake_case__ : str = pt_model.eval()
snake_case__ : Any = pt_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ : Optional[int] = modified_state_dict.pop(_UpperCAmelCase )
snake_case__ : List[Any] = rename_key(_UpperCAmelCase )
snake_case__ : List[str] = value
hf_model.load_state_dict(_UpperCAmelCase )
snake_case__ : str = 3_84
snake_case__ : Dict = load_demo_image(image_size=_UpperCAmelCase , device="cpu" )
snake_case__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
snake_case__ : int = tokenizer(["a picture of"] ).input_ids
snake_case__ : List[str] = hf_model.generate(_UpperCAmelCase , _UpperCAmelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
snake_case__ : Tuple = hf_model.generate(_UpperCAmelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
snake_case__ : Optional[int] = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
snake_case__ : str = blip_vqa(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit="base" )
vqa_model.eval()
snake_case__ : Union[str, Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ : str = modified_state_dict.pop(_UpperCAmelCase )
snake_case__ : Any = rename_key(_UpperCAmelCase )
snake_case__ : Optional[Any] = value
snake_case__ : Union[str, Any] = BlipForQuestionAnswering(_UpperCAmelCase )
hf_vqa_model.load_state_dict(_UpperCAmelCase )
snake_case__ : List[Any] = ["How many dogs are in this image?"]
snake_case__ : Optional[Any] = tokenizer(_UpperCAmelCase , return_tensors="pt" ).input_ids
snake_case__ : List[Any] = hf_vqa_model.generate(_UpperCAmelCase , _UpperCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
snake_case__ : List[str] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
snake_case__ : Optional[int] = blip_itm(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit="base" )
itm_model.eval()
snake_case__ : str = itm_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ : Tuple = modified_state_dict.pop(_UpperCAmelCase )
snake_case__ : Optional[Any] = rename_key(_UpperCAmelCase )
snake_case__ : List[str] = value
snake_case__ : Any = BlipForImageTextRetrieval(_UpperCAmelCase )
snake_case__ : Union[str, Any] = ["A picture of a woman with a dog sitting in a beach"]
snake_case__ : Optional[Any] = tokenizer(
_UpperCAmelCase , return_tensors="pt" , padding="max_length" , truncation=_UpperCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCAmelCase )
hf_itm_model.eval()
snake_case__ : List[str] = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
snake_case__ : List[Any] = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowercase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 150 | 0 |
from __future__ import annotations
def __magic_name__ ( __a : list[int] , __a : int ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def __magic_name__ ( __a : list[int] , __a : int , __a : int , __a : list[int] , __a : list[list[int]] , __a : int , ):
'''simple docstring'''
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
lowerCamelCase_ = [3, 34, 4, 12, 5, 2]
lowerCamelCase_ = 9
lowerCamelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 513 |
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCamelCase_ = tuple[int, int]
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = vertices
UpperCamelCase__ = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCamelCase__ = weight
def UpperCAmelCase_ (self ):
UpperCamelCase__ = Graph({min(self.vertices )} , {} )
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCamelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCamelCase__ = edge
UpperCamelCase__ = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def __magic_name__ ( __a : str = "p107_network.txt" ):
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(os.path.dirname(__a ) )
UpperCamelCase__ = os.path.join(__a , __a )
UpperCamelCase__ = {}
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
with open(__a ) as f:
UpperCamelCase__ = f.read().strip().split("""\n""" )
UpperCamelCase__ = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(__a ) ):
for edgea in range(__a ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCamelCase__ = int(adjaceny_matrix[edgea][edgea] )
UpperCamelCase__ = Graph(set(range(len(__a ) ) ) , __a )
UpperCamelCase__ = graph.prims_algorithm()
UpperCamelCase__ = sum(graph.edges.values() )
UpperCamelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'{solution() = }')
| 513 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase_ = get_logger(__name__)
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = None ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = (
os.path.join(lowerCamelCase__, config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ : Tuple = Extractor
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ : Dict = os.path.abspath(lowerCamelCase__ )
return os.path.join(self.extract_dir, hash_url_to_filename(lowerCamelCase__ ) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(lowerCamelCase__ ) and not (os.path.isdir(lowerCamelCase__ ) and os.listdir(lowerCamelCase__ ))
)
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = False ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.extractor.infer_extractor_format(lowerCamelCase__ )
if not extractor_format:
return input_path
UpperCamelCase__ : List[Any] = self._get_output_path(lowerCamelCase__ )
if self._do_extract(lowerCamelCase__, lowerCamelCase__ ):
self.extractor.extract(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return output_path
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls, __magic_name__, **__magic_name__ ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
...
class lowercase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
a : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
with open(lowerCamelCase__, '''rb''' ) as f:
return f.read(lowerCamelCase__ )
@classmethod
def UpperCamelCase__ ( cls, __magic_name__, __magic_name__ = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
UpperCamelCase__ : Dict = max(len(lowerCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase__ : Dict = cls.read_magic_number(lowerCamelCase__, lowerCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(lowerCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls, __magic_name__, **__magic_name__ ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(lowerCamelCase__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(lowerCamelCase__ ) )
def badpath(__magic_name__, __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCamelCase__, lowerCamelCase__ ) ).startswith(lowerCamelCase__ )
def badlink(__magic_name__, __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ : List[Any] = resolved(os.path.join(lowerCamelCase__, os.path.dirname(info.name ) ) )
return badpath(info.linkname, base=lowerCamelCase__ )
UpperCamelCase__ : int = resolved(lowerCamelCase__ )
for finfo in members:
if badpath(finfo.name, lowerCamelCase__ ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowerCamelCase__, lowerCamelCase__ ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowerCamelCase__, lowerCamelCase__ ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
UpperCamelCase__ : Any = tarfile.open(lowerCamelCase__ )
tar_file.extractall(lowerCamelCase__, members=TarExtractor.safemembers(lowerCamelCase__, lowerCamelCase__ ) )
tar_file.close()
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
a : Any = [b"\x1F\x8B"]
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
with gzip.open(lowerCamelCase__, '''rb''' ) as gzip_file:
with open(lowerCamelCase__, '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__, lowerCamelCase__ )
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
a : List[Any] = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls, __magic_name__, __magic_name__ = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(lowerCamelCase__, magic_number=lowerCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCamelCase__, '''rb''' ) as fp:
UpperCamelCase__ : List[Any] = _EndRecData(lowerCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ : Tuple = fp.read(lowerCamelCase__ ) # CD is where we expect it to be
if len(lowerCamelCase__ ) == sizeCentralDir:
UpperCamelCase__ : List[str] = struct.unpack(lowerCamelCase__, lowerCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
with zipfile.ZipFile(lowerCamelCase__, '''r''' ) as zip_file:
zip_file.extractall(lowerCamelCase__ )
zip_file.close()
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
a : List[str] = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
with lzma.open(lowerCamelCase__ ) as compressed_file:
with open(lowerCamelCase__, '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__, lowerCamelCase__ )
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
a : str = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
UpperCamelCase__ : int = rarfile.RarFile(lowerCamelCase__ )
rf.extractall(lowerCamelCase__ )
rf.close()
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
a : Dict = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
UpperCamelCase__ : Tuple = zstd.ZstdDecompressor()
with open(lowerCamelCase__, '''rb''' ) as ifh, open(lowerCamelCase__, '''wb''' ) as ofh:
dctx.copy_stream(lowerCamelCase__, lowerCamelCase__ )
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
a : str = [b"\x42\x5A\x68"]
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
with bza.open(lowerCamelCase__, '''rb''' ) as compressed_file:
with open(lowerCamelCase__, '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__, lowerCamelCase__ )
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
a : List[str] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
with pyazr.SevenZipFile(lowerCamelCase__, '''r''' ) as archive:
archive.extractall(lowerCamelCase__ )
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
a : List[Any] = [b"\x04\x22\x4D\x18"]
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(lowerCamelCase__, '''rb''' ) as compressed_file:
with open(lowerCamelCase__, '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__, lowerCamelCase__ )
class lowercase__ :
'''simple docstring'''
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ) -> Dict:
"""simple docstring"""
return max(
len(lowerCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(lowerCamelCase__, lowerCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCamelCase__, magic_number_length=lowerCamelCase__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls, __magic_name__, __magic_name__ = False ) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''', category=lowerCamelCase__, )
UpperCamelCase__ : Tuple = cls.infer_extractor_format(lowerCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls, __magic_name__ ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = cls._get_magic_number_max_length()
UpperCamelCase__ : Optional[Any] = cls._read_magic_number(lowerCamelCase__, lowerCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCamelCase__, magic_number=lowerCamelCase__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = "deprecated", ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(lowerCamelCase__ ), exist_ok=lowerCamelCase__ )
# Prevent parallel extractions
UpperCamelCase__ : List[str] = str(Path(lowerCamelCase__ ).with_suffix('''.lock''' ) )
with FileLock(lowerCamelCase__ ):
shutil.rmtree(lowerCamelCase__, ignore_errors=lowerCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCamelCase__, lowerCamelCase__ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''', category=lowerCamelCase__, )
UpperCamelCase__ : Optional[Any] = extractor if extractor != "deprecated" else extractor_format
else:
UpperCamelCase__ : List[str] = cls.extractors[extractor_format]
return extractor.extract(lowerCamelCase__, lowerCamelCase__ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''', category=lowerCamelCase__, )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCamelCase__ ):
return extractor.extract(lowerCamelCase__, lowerCamelCase__ )
| 718 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : torch.FloatTensor
a : torch.FloatTensor
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : List[Any] = 1
@register_to_config
def __init__( self, __magic_name__ = 2000, __magic_name__ = 0.15, __magic_name__ = 0.01, __magic_name__ = 1348.0, __magic_name__ = 1E-5, __magic_name__ = 1, ) -> int:
"""simple docstring"""
# standard deviation of the initial noise distribution
UpperCamelCase__ : int = sigma_max
# setable values
UpperCamelCase__ : Optional[int] = None
self.set_sigmas(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None ) -> int:
"""simple docstring"""
UpperCamelCase__ : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase__ : List[Any] = torch.linspace(1, __magic_name__, __magic_name__, device=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase__ : str = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase__ : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase__ : Optional[Any] = torch.exp(torch.linspace(math.log(__magic_name__ ), math.log(__magic_name__ ), __magic_name__ ) )
UpperCamelCase__ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = True, ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
UpperCamelCase__ : Any = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase__ : Any = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase__ : List[Any] = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase__ : str = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase__ : List[Any] = self.get_adjacent_sigma(__magic_name__, __magic_name__ ).to(sample.device )
UpperCamelCase__ : Optional[Any] = torch.zeros_like(__magic_name__ )
UpperCamelCase__ : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase__ : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase__ : Any = diffusion.unsqueeze(-1 )
UpperCamelCase__ : str = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase__ : Union[str, Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__magic_name__, device=sample.device, dtype=sample.dtype )
UpperCamelCase__ : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase__ : str = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__magic_name__, prev_sample_mean=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = True, ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase__ : List[str] = randn_tensor(sample.shape, layout=sample.layout, generator=__magic_name__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase__ : str = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
UpperCamelCase__ : Tuple = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
UpperCamelCase__ : Union[str, Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase__ : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase__ : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase__ : int = step_size.unsqueeze(-1 )
UpperCamelCase__ : int = sample + step_size * model_output
UpperCamelCase__ : List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase__ : Any = timesteps.to(original_samples.device )
UpperCamelCase__ : List[str] = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase__ : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__magic_name__ ) * sigmas[:, None, None, None]
)
UpperCamelCase__ : int = noise + original_samples
return noisy_samples
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 369 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """efficientnet"""
def __init__( self , __magic_name__ = 3 , __magic_name__ = 6_0_0 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __magic_name__ = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2_5_6_0 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : List[Any] = image_size
lowerCamelCase : str = width_coefficient
lowerCamelCase : Dict = depth_coefficient
lowerCamelCase : Optional[Any] = depth_divisor
lowerCamelCase : Union[str, Any] = kernel_sizes
lowerCamelCase : Union[str, Any] = in_channels
lowerCamelCase : List[str] = out_channels
lowerCamelCase : List[str] = depthwise_padding
lowerCamelCase : Dict = strides
lowerCamelCase : List[str] = num_block_repeats
lowerCamelCase : Dict = expand_ratios
lowerCamelCase : Any = squeeze_expansion_ratio
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : int = hidden_dim
lowerCamelCase : Any = pooling_type
lowerCamelCase : Any = initializer_range
lowerCamelCase : List[str] = batch_norm_eps
lowerCamelCase : str = batch_norm_momentum
lowerCamelCase : List[Any] = dropout_rate
lowerCamelCase : Optional[int] = drop_connect_rate
lowerCamelCase : Union[str, Any] = sum(__magic_name__ ) * 4
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
| 681 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681 | 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase__ ( _UpperCAmelCase = True , *_UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
A_ : List[str] = False
if main_process_only:
A_ : Optional[Any] = PartialState().local_process_index == 0
return _tqdm(*_UpperCAmelCase , **_UpperCAmelCase , disable=_UpperCAmelCase ) | 302 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=0.999 , _UpperCAmelCase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
A_ : str = []
for i in range(_UpperCAmelCase ):
A_ : Optional[Any] = i / num_diffusion_timesteps
A_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
lowercase_ : List[Any] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.0_00_85 , snake_case_ = 0.0_12 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = False , snake_case_ = False , snake_case_ = 1.0 , snake_case_ = "linspace" , snake_case_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
A_ : Optional[int] = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
A_ : List[Any] = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : List[str] = betas_for_alpha_bar(snake_case_ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A_ : Union[str, Any] = betas_for_alpha_bar(snake_case_ , alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
A_ : Dict = 1.0 - self.betas
A_ : Any = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = use_karras_sigmas
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if schedule_timesteps is None:
A_ : List[str] = self.timesteps
A_ : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A_ : Tuple = 1 if len(snake_case_ ) > 1 else 0
else:
A_ : Optional[int] = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
A_ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , ):
"""simple docstring"""
A_ : Optional[int] = self.index_for_timestep(snake_case_ )
A_ : Union[str, Any] = self.sigmas[step_index]
A_ : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : Tuple = num_inference_steps
A_ : Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A_ : Dict = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A_ : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : str = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A_ : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : Optional[Any] = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
A_ : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A_ : int = np.log(snake_case_ )
A_ : str = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
if self.config.use_karras_sigmas:
A_ : Union[str, Any] = self._convert_to_karras(in_sigmas=snake_case_ , num_inference_steps=self.num_inference_steps )
A_ : Dict = np.array([self._sigma_to_t(snake_case_ , snake_case_ ) for sigma in sigmas] )
A_ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A_ : List[Any] = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
A_ : str = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A_ : int = torch.from_numpy(snake_case_ )
A_ : Dict = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(snake_case_ ).startswith('mps' ):
# mps does not support float64
A_ : int = timesteps.to(snake_case_ , dtype=torch.floataa )
else:
A_ : Tuple = timesteps.to(device=snake_case_ )
# empty dt and derivative
A_ : Optional[int] = None
A_ : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A_ : Any = defaultdict(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = np.log(snake_case_ )
# get distribution
A_ : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A_ : Optional[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A_ : Tuple = low_idx + 1
A_ : Dict = log_sigmas[low_idx]
A_ : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
A_ : Any = (low - log_sigma) / (low - high)
A_ : Optional[int] = np.clip(snake_case_ , 0 , 1 )
# transform interpolation to time range
A_ : str = (1 - w) * low_idx + w * high_idx
A_ : Optional[int] = t.reshape(sigma.shape )
return t
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : float = in_sigmas[-1].item()
A_ : float = in_sigmas[0].item()
A_ : str = 7.0 # 7.0 is the value used in the paper
A_ : str = np.linspace(0 , 1 , snake_case_ )
A_ : List[str] = sigma_min ** (1 / rho)
A_ : int = sigma_max ** (1 / rho)
A_ : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.dt is None
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
"""simple docstring"""
A_ : Dict = self.index_for_timestep(snake_case_ )
# advance index counter by 1
A_ : Tuple = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A_ : Tuple = self.sigmas[step_index]
A_ : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A_ : str = self.sigmas[step_index - 1]
A_ : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A_ : Tuple = 0
A_ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A_ : str = sigma_hat if self.state_in_first_order else sigma_next
A_ : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A_ : Tuple = sigma_hat if self.state_in_first_order else sigma_next
A_ : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A_ : Optional[int] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
A_ : Union[str, Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A_ : str = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A_ : str = sigma_next - sigma_hat
# store for 2nd order step
A_ : Optional[Any] = derivative
A_ : Union[str, Any] = dt
A_ : Optional[Any] = sample
else:
# 2. 2nd order / Heun's method
A_ : List[Any] = (sample - pred_original_sample) / sigma_next
A_ : Optional[Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A_ : List[Any] = self.dt
A_ : Optional[Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A_ : List[str] = None
A_ : Tuple = None
A_ : str = None
A_ : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , ):
"""simple docstring"""
A_ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
A_ : str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
A_ : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A_ : List[str] = self.timesteps.to(original_samples.device )
A_ : int = timesteps.to(original_samples.device )
A_ : Union[str, Any] = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
A_ : int = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A_ : List[Any] = sigma.unsqueeze(-1 )
A_ : List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps | 302 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 294 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( lowercase_ ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=_lowercase, unet=_lowercase, scheduler=_lowercase )
@torch.no_grad()
def __call__( self, _lowercase = 1, _lowercase = None, _lowercase = 0.0, _lowercase = 50, _lowercase = "pil", _lowercase = True, **_lowercase, ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=_lowercase, )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
SCREAMING_SNAKE_CASE_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE_ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_lowercase, _lowercase )
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(_lowercase, _lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_lowercase, _lowercase, _lowercase, **_lowercase ).prev_sample
# decode the image latents with the VAE
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_lowercase ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0, 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 294 | 1 |
"""simple docstring"""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__lowerCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class UpperCamelCase__:
lowerCAmelCase__ : str
lowerCAmelCase__ : Optional[str] = None
lowerCAmelCase__ : Optional[Union[str, int]] = None
lowerCAmelCase__ : Optional[Union[str, int]] = None
lowerCAmelCase__ : Optional[Union[str, int]] = None
def snake_case__ ( self ) -> Any:
A__ , A__ , A__ = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Optional[Any]:
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def snake_case__ ( self ) -> Union[str, Any]:
return self.major, self.minor, self.patch
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
return Version(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
return other
raise TypeError(f'''{other} (type {type(__UpperCAmelCase )}) cannot be compared to version.''' )
def __eq__( self ,__UpperCAmelCase ) -> Optional[int]:
try:
A__ = self._validate_operand(__UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = self._validate_operand(__UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self ) -> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def snake_case__ ( cls ,__UpperCAmelCase ) -> Dict:
A__ = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def snake_case__ ( self ) -> str:
return self.version_str
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = _VERSION_REG.match(UpperCamelCase__ )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(UpperCamelCase__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return ".".join(str(UpperCamelCase__ ) for v in version_tuple )
| 715 | """simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self ,__UpperCAmelCase ) -> str:
A__ = parent
def snake_case__ ( self ) -> int:
return {}
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
A__ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case__ ( self ) -> Dict:
A__ = MarkupLMFeatureExtractionTester(self )
@property
def snake_case__ ( self ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case__ ( self ) -> Any:
# Initialize feature_extractor
A__ = self.feature_extraction_class()
# Test not batched input
A__ = get_html_strings()[0]
A__ = feature_extractor(__UpperCAmelCase )
# fmt: off
A__ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
A__ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes ,__UpperCAmelCase )
self.assertEqual(encoding.xpaths ,__UpperCAmelCase )
# Test batched
A__ = get_html_strings()
A__ = feature_extractor(__UpperCAmelCase )
# fmt: off
A__ = expected_nodes + [['My First Heading', 'My first paragraph.']]
A__ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) ,2 )
self.assertEqual(len(encoding.xpaths ) ,2 )
self.assertEqual(encoding.nodes ,__UpperCAmelCase )
self.assertEqual(encoding.xpaths ,__UpperCAmelCase )
| 536 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A :
def __init__( self : Dict , __magic_name__ : List[str] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : int="resnet50" , __magic_name__ : List[Any]=3 , __magic_name__ : Dict=32 , __magic_name__ : int=3 , __magic_name__ : List[str]=True , __magic_name__ : Any=True , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = out_indices if out_indices is not None else [4]
lowerCAmelCase__ = stage_names
lowerCAmelCase__ = out_features
lowerCAmelCase__ = backbone
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = is_training
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (TimmBackbone,) if is_torch_available() else ()
snake_case__ :Optional[int] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case__ :Optional[Any] = False
snake_case__ :Union[str, Any] = False
snake_case__ :Tuple = False
snake_case__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = TimmBackboneModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "resnet18"
lowerCAmelCase__ = "microsoft/resnet-18"
lowerCAmelCase__ = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
lowerCAmelCase__ = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase__ = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
lowerCAmelCase__ = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__magic_name__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase__ = self.all_model_classes[0]
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
lowerCAmelCase__ = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = model(**__magic_name__ )
lowerCAmelCase__ = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase__ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase__ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase__ = copy.deepcopy(__magic_name__ )
lowerCAmelCase__ = None
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase__ = copy.deepcopy(__magic_name__ )
lowerCAmelCase__ = False
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(**__magic_name__ )
| 48 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 1 |
from collections.abc import Callable
import numpy as np
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> np.ndarray:
_lowercase : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowercase : Optional[int] = np.zeros((n + 1,) )
_lowercase : int = ya
_lowercase : Union[str, Any] = xa
for k in range(lowerCamelCase_ ):
_lowercase : Optional[int] = y[k] + step_size * ode_func(lowerCamelCase_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=[30, 30], lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=10, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=None, lowerCamelCase=8, lowerCamelCase=10, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = parent
_lowercase : int = batch_size
_lowercase : str = image_size
_lowercase : Any = patch_size
_lowercase : Optional[Any] = num_channels
_lowercase : Union[str, Any] = is_training
_lowercase : Dict = use_labels
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : Tuple = num_labels
_lowercase : Any = scope
_lowercase : Optional[Any] = n_targets
_lowercase : List[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_lowercase : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_lowercase : str = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
_lowercase : str = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_lowercase : Optional[Any] = []
for i in range(self.batch_size):
_lowercase : Tuple = {}
_lowercase : Dict = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=lowerCamelCase)
_lowercase : str = torch.rand(self.n_targets, 4, device=lowerCamelCase)
labels.append(lowerCamelCase)
_lowercase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = YolosModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = YolosForObjectDetection(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(pixel_values=lowerCamelCase)
_lowercase : Union[str, Any] = model(lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4))
_lowercase : Tuple = model(pixel_values=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4))
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : Dict = config_and_inputs
_lowercase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : int = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase_ : Optional[Any] = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase_ : Tuple = False
lowercase_ : Optional[Any] = False
lowercase_ : Tuple = False
lowercase_ : Optional[Any] = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> str:
"""simple docstring"""
_lowercase : List[Any] = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_lowercase : Dict = []
for i in range(self.model_tester.batch_size):
_lowercase : List[Any] = {}
_lowercase : str = torch.ones(
size=(self.model_tester.n_targets,), device=lowerCamelCase, dtype=torch.long)
_lowercase : List[str] = torch.ones(
self.model_tester.n_targets, 4, device=lowerCamelCase, dtype=torch.float)
labels.append(lowerCamelCase)
_lowercase : Optional[int] = labels
return inputs_dict
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = YolosModelTester(self)
_lowercase : int = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[int] = model_class(lowerCamelCase)
_lowercase : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Union[str, Any] = [*signature.parameters.keys()]
_lowercase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : List[str] = True
# in YOLOS, the seq_len is different
_lowercase : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = True
_lowercase : str = False
_lowercase : Tuple = True
_lowercase : Tuple = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : int = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : Optional[int] = outputs.attentions
self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : int = True
_lowercase : Tuple = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Any = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : str = outputs.attentions
self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], )
_lowercase : Optional[Any] = len(lowerCamelCase)
# Check attention is always last and order is fine
_lowercase : List[str] = True
_lowercase : Union[str, Any] = True
_lowercase : Any = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : Dict = 1
self.assertEqual(out_len + added_hidden_states, len(lowerCamelCase))
_lowercase : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Tuple = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : int = outputs.hidden_states
_lowercase : Dict = getattr(
self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# YOLOS has a different seq_length
_lowercase : List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], )
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[Any] = YolosModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> List[str]:
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[str] = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(lowerCamelCase)
_lowercase : int = self.default_image_processor
_lowercase : List[Any] = prepare_img()
_lowercase : str = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : str = model(inputs.pixel_values)
# verify outputs
_lowercase : Optional[int] = torch.Size((1, 1_00, 92))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]], device=lowerCamelCase, )
_lowercase : Dict = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]], device=lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCamelCase, atol=1E-4))
# verify postprocessing
_lowercase : str = image_processor.post_process_object_detection(
lowerCamelCase, threshold=0.3, target_sizes=[image.size[::-1]])[0]
_lowercase : Union[str, Any] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1]).to(lowerCamelCase)
_lowercase : Optional[Any] = [75, 75, 17, 63, 17]
_lowercase : Union[str, Any] = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5]).to(lowerCamelCase)
self.assertEqual(len(results['scores']), 5)
self.assertTrue(torch.allclose(results['scores'], lowerCamelCase, atol=1E-4))
self.assertSequenceEqual(results['labels'].tolist(), lowerCamelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :], lowerCamelCase))
| 354 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
__magic_name__ = 5
__magic_name__ = 10
@require_sentencepiece
@require_tokenizers
class lowercase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SpeechaTextTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
UpperCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(__lowerCamelCase )
UpperCAmelCase = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__lowerCamelCase ) )]
UpperCAmelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCAmelCase = Path(self.tmpdirname )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = "<pad>"
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__lowerCamelCase ) , 1001 )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [289, 50, 14, 174, 386] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
# fmt: off
UpperCAmelCase = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """valhalla/s2t_mustc_multilinguial_medium"""
__SCREAMING_SNAKE_CASE = """C\'est trop cool"""
__SCREAMING_SNAKE_CASE = """Esto es genial"""
@classmethod
def snake_case_ ( cls ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
UpperCAmelCase = [ES_CODE, 4, 1601, 47, 7647, 2]
UpperCAmelCase = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = "fr"
UpperCAmelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __lowerCamelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
UpperCAmelCase = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 254 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = checkpoints.load_tax_checkpoint(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_SCREAMING_SNAKE_CASE : List[str] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_SCREAMING_SNAKE_CASE : Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : int = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE : List[str] = f"""layers_{str(__lowerCamelCase )}"""
# Self-Attention
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Tuple = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_SCREAMING_SNAKE_CASE : List[str] = flax_model.params["encoder"]["block"][str(__lowerCamelCase )]["layer"]
_SCREAMING_SNAKE_CASE : Tuple = tax_attention_key
_SCREAMING_SNAKE_CASE : List[Any] = tax_attention_out
_SCREAMING_SNAKE_CASE : Tuple = tax_attention_query
_SCREAMING_SNAKE_CASE : List[str] = tax_attention_value
_SCREAMING_SNAKE_CASE : Any = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : str = tax_global_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : int = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE : List[str] = tax_mlp_wi
_SCREAMING_SNAKE_CASE : int = tax_mlp_wo
_SCREAMING_SNAKE_CASE : Optional[int] = tax_mlp_layer_norm
_SCREAMING_SNAKE_CASE : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_SCREAMING_SNAKE_CASE : str = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_SCREAMING_SNAKE_CASE : Tuple = tax_encoder_global_rel_embedding
# Assigning
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE : Dict = f"""layers_{str(__lowerCamelCase )}"""
# Self-Attention
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_SCREAMING_SNAKE_CASE : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_SCREAMING_SNAKE_CASE : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_SCREAMING_SNAKE_CASE : List[Any] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_enc_dec_attention_module["key"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_enc_dec_attention_module["out"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_enc_dec_attention_module["query"]["kernel"]
_SCREAMING_SNAKE_CASE : int = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Any = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_SCREAMING_SNAKE_CASE : int = flax_model.params["decoder"]["block"][str(__lowerCamelCase )]["layer"]
_SCREAMING_SNAKE_CASE : Any = tax_attention_key
_SCREAMING_SNAKE_CASE : Optional[int] = tax_attention_out
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_attention_query
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_attention_value
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_pre_attention_layer_norm
_SCREAMING_SNAKE_CASE : Optional[int] = tax_enc_dec_attention_key
_SCREAMING_SNAKE_CASE : Dict = tax_enc_dec_attention_out
_SCREAMING_SNAKE_CASE : Dict = tax_enc_dec_attention_query
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_enc_dec_attention_value
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_cross_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : int = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi
_SCREAMING_SNAKE_CASE : List[str] = tax_mlp_wo
_SCREAMING_SNAKE_CASE : Any = txa_mlp_layer_norm
_SCREAMING_SNAKE_CASE : List[str] = flax_model_decoder_layer_block
# Decoder Normalization
_SCREAMING_SNAKE_CASE : Optional[int] = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_SCREAMING_SNAKE_CASE : List[str] = txa_decoder_norm
# Only for layer 0:
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_SCREAMING_SNAKE_CASE : Tuple = tax_decoder_rel_embedding
# Token Embeddings
_SCREAMING_SNAKE_CASE : Tuple = tax_model["target"]["token_embedder"]["embedding"]
_SCREAMING_SNAKE_CASE : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(__lowerCamelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
UpperCamelCase__ =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 249 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase ,__UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE : Tuple = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : int = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE : Any = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def lowercase__( __UpperCamelCase: int = 10_00 ):
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 508 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 50 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 508 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class snake_case__ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Tuple = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
a__ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
a__ : Optional[int] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ : Dict = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowercase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
a__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
a__ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
a__ : Union[str, Any] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ : int = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowercase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
a__ : List[str] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
a__ : int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : str = self.get_dummy_components()
a__ : Any = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Union[str, Any] = self.get_dummy_inputs(__lowercase )
a__ : str = inputs["""prompt"""]
a__ : List[Any] = inputs["""generator"""]
a__ : Optional[Any] = inputs["""num_inference_steps"""]
a__ : int = inputs["""output_type"""]
if "image" in inputs:
a__ : List[str] = inputs["""image"""]
else:
a__ : Optional[Any] = None
if "mask_image" in inputs:
a__ : Optional[int] = inputs["""mask_image"""]
else:
a__ : str = None
if "original_image" in inputs:
a__ : List[Any] = inputs["""original_image"""]
else:
a__ : Optional[int] = None
a__ , a__ : int = pipe.encode_prompt(__lowercase )
# inputs with prompt converted to embeddings
a__ : List[Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
a__ : Union[str, Any] = image
if mask_image is not None:
a__ : Dict = mask_image
if original_image is not None:
a__ : Tuple = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowercase , __lowercase , __lowercase )
a__ : List[str] = pipe(**__lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowercase )
a__ : Tuple = self.pipeline_class.from_pretrained(__lowercase )
pipe_loaded.to(__lowercase )
pipe_loaded.set_progress_bar_config(disable=__lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowercase , __lowercase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
a__ : str = self.get_dummy_inputs(__lowercase )
a__ : Dict = inputs["""generator"""]
a__ : str = inputs["""num_inference_steps"""]
a__ : Dict = inputs["""output_type"""]
# inputs with prompt converted to embeddings
a__ : Dict = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
a__ : List[str] = image
if mask_image is not None:
a__ : List[str] = mask_image
if original_image is not None:
a__ : Optional[Any] = original_image
a__ : str = pipe_loaded(**__lowercase )[0]
a__ : int = np.abs(to_np(__lowercase ) - to_np(__lowercase ) ).max()
self.assertLess(__lowercase , 1E-4 )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = self.get_dummy_components()
a__ : List[Any] = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Union[str, Any] = self.get_dummy_inputs(__lowercase )
a__ : Optional[Any] = pipe(**__lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowercase )
a__ : Any = self.pipeline_class.from_pretrained(__lowercase )
pipe_loaded.to(__lowercase )
pipe_loaded.set_progress_bar_config(disable=__lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
a__ : List[str] = self.get_dummy_inputs(__lowercase )
a__ : Tuple = pipe_loaded(**__lowercase )[0]
a__ : Optional[int] = np.abs(to_np(__lowercase ) - to_np(__lowercase ) ).max()
self.assertLess(__lowercase , 1E-4 )
| 136 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Optional[int] ={
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =[
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 136 | 1 |
"""simple docstring"""
import qiskit
def lowerCamelCase__ ( UpperCAmelCase_ = 2 )-> qiskit.result.counts.Counts:
"""simple docstring"""
UpperCamelCase = qubits
# Using Aer's simulator
UpperCamelCase = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
UpperCamelCase = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCAmelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCAmelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCAmelCase_ ) ) , list(range(UpperCAmelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCamelCase = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=10_00 )
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 556 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 556 | 1 |
from __future__ import annotations
from math import pi
def UpperCamelCase( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A__ : Tuple = TypeVar('''KEY''')
A__ : List[Any] = TypeVar('''VAL''')
@dataclass(frozen=UpperCamelCase_ ,slots=UpperCamelCase_ )
class __snake_case ( Generic[KEY, VAL] ):
_a = 42
_a = 42
class __snake_case ( _Item ):
def __init__( self : Union[str, Any]):
super().__init__(A_ , A_)
def __bool__( self : int):
return False
A__ : Optional[int] = _DeletedItem()
class __snake_case ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , A_ : int = 8 , A_ : float = 0.75):
lowerCAmelCase_ : Optional[Any] = initial_block_size
lowerCAmelCase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase_ : str = capacity_factor
lowerCAmelCase_ : Union[str, Any] = 0
def UpperCAmelCase__ ( self : str , A_ : KEY):
return hash(A_) % len(self._buckets)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int):
return (ind + 1) % len(self._buckets)
def UpperCAmelCase__ ( self : Tuple , A_ : int , A_ : KEY , A_ : VAL):
lowerCAmelCase_ : str = self._buckets[ind]
if not stored:
lowerCAmelCase_ : Optional[int] = _Item(A_ , A_)
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase_ : Union[str, Any] = _Item(A_ , A_)
return True
else:
return False
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : List[Any] = len(self._buckets) * self._capacity_factor
return len(self) >= int(A_)
def UpperCAmelCase__ ( self : List[Any]):
if len(self._buckets) <= self._initial_block_size:
return False
lowerCAmelCase_ : Any = len(self._buckets) * self._capacity_factor / 2
return len(self) < limit
def UpperCAmelCase__ ( self : Optional[Any] , A_ : int):
lowerCAmelCase_ : List[str] = self._buckets
lowerCAmelCase_ : str = [None] * new_size
lowerCAmelCase_ : Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val)
def UpperCAmelCase__ ( self : int):
self._resize(len(self._buckets) * 2)
def UpperCAmelCase__ ( self : Dict):
self._resize(len(self._buckets) // 2)
def UpperCAmelCase__ ( self : List[str] , A_ : KEY):
lowerCAmelCase_ : str = self._get_bucket_index(A_)
for _ in range(len(self._buckets)):
yield ind
lowerCAmelCase_ : Tuple = self._get_next_ind(A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : KEY , A_ : VAL):
for ind in self._iterate_buckets(A_):
if self._try_set(A_ , A_ , A_):
break
def __setitem__( self : int , A_ : KEY , A_ : VAL):
if self._is_full():
self._size_up()
self._add_item(A_ , A_)
def __delitem__( self : Tuple , A_ : KEY):
for ind in self._iterate_buckets(A_):
lowerCAmelCase_ : Tuple = self._buckets[ind]
if item is None:
raise KeyError(A_)
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase_ : Dict = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , A_ : KEY):
for ind in self._iterate_buckets(A_):
lowerCAmelCase_ : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A_)
def __len__( self : Dict):
return self._len
def __iter__( self : Dict):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any]):
lowerCAmelCase_ : List[Any] = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item)
return F"""HashMap({val_string})"""
| 171 | 1 |
def _lowercase ( lowerCamelCase__ : Tuple ):
if not isinstance(_snake_case, _snake_case ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ ={
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 616 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ =logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ ="RegNetConfig"
# Base docstring
UpperCAmelCase__ ="facebook/regnet-y-040"
UpperCAmelCase__ =[1, 1088, 7, 7]
# Image classification docstring
UpperCAmelCase__ ="facebook/regnet-y-040"
UpperCAmelCase__ ="tabby, tabby cat"
UpperCAmelCase__ =[
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , A_ : int , A_ : int , A_ : int = 3 , A_ : int = 1 , A_ : int = 1 , A_ : Optional[str] = "relu" , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Convad(
A_ , A_ , kernel_size=A_ , stride=A_ , padding=kernel_size // 2 , groups=A_ , bias=A_ , )
__lowercase = nn.BatchNormad(A_ )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : Optional[int] ):
'''simple docstring'''
__lowercase = self.convolution(A_ )
__lowercase = self.normalization(A_ )
__lowercase = self.activation(A_ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , A_ : RegNetConfig ):
'''simple docstring'''
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : Optional[int] ):
'''simple docstring'''
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__lowercase = self.embedder(A_ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[int] , A_ : int , A_ : int , A_ : int = 2 ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Convad(A_ , A_ , kernel_size=1 , stride=A_ , bias=A_ )
__lowercase = nn.BatchNormad(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : Tensor ):
'''simple docstring'''
__lowercase = self.convolution(A_ )
__lowercase = self.normalization(A_ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , A_ : int , A_ : int ):
'''simple docstring'''
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(A_ , A_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(A_ , A_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : List[Any] ):
'''simple docstring'''
__lowercase = self.pooler(A_ )
__lowercase = self.attention(A_ )
__lowercase = hidden_state * attention
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , A_ : RegNetConfig , A_ : int , A_ : int , A_ : int = 1 ):
'''simple docstring'''
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(A_ , A_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(A_ , A_ , stride=A_ , groups=A_ , activation=config.hidden_act ) , RegNetConvLayer(A_ , A_ , kernel_size=1 , activation=A_ ) , )
__lowercase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : Tuple ):
'''simple docstring'''
__lowercase = hidden_state
__lowercase = self.layer(A_ )
__lowercase = self.shortcut(A_ )
hidden_state += residual
__lowercase = self.activation(A_ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : List[str] , A_ : RegNetConfig , A_ : int , A_ : int , A_ : int = 1 ):
'''simple docstring'''
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(A_ , A_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(A_ , A_ , stride=A_ , groups=A_ , activation=config.hidden_act ) , RegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(A_ , A_ , kernel_size=1 , activation=A_ ) , )
__lowercase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : Tuple ):
'''simple docstring'''
__lowercase = hidden_state
__lowercase = self.layer(A_ )
__lowercase = self.shortcut(A_ )
hidden_state += residual
__lowercase = self.activation(A_ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[int] , A_ : RegNetConfig , A_ : int , A_ : int , A_ : int = 2 , A_ : int = 2 , ):
'''simple docstring'''
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
A_ , A_ , A_ , stride=A_ , ) , *[layer(A_ , A_ , A_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.layers(A_ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Tuple , A_ : RegNetConfig ):
'''simple docstring'''
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A_ , config.depths[1:] ):
self.stages.append(RegNetStage(A_ , A_ , A_ , depth=A_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : Tensor , A_ : bool = False , A_ : bool = True ):
'''simple docstring'''
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(A_ )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
class lowerCamelCase__ ( _a ):
a : List[str] = RegNetConfig
a : int = """regnet"""
a : Tuple = """pixel_values"""
a : Dict = True
def SCREAMING_SNAKE_CASE_ ( self : int , A_ : List[Any] ):
'''simple docstring'''
if isinstance(A_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : Dict , A_ : List[str]=False ):
'''simple docstring'''
if isinstance(A_ , A_ ):
__lowercase = value
UpperCAmelCase__ =R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ =R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCamelCase__ ( _a ):
def __init__( self : Any , A_ : Any ):
'''simple docstring'''
super().__init__(A_ )
__lowercase = config
__lowercase = RegNetEmbeddings(A_ )
__lowercase = RegNetEncoder(A_ )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : Tensor , A_ : Optional[bool] = None , A_ : Optional[bool] = None ):
'''simple docstring'''
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(A_ )
__lowercase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(A_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCamelCase__ ( _a ):
def __init__( self : int , A_ : int ):
'''simple docstring'''
super().__init__(A_ )
__lowercase = config.num_labels
__lowercase = RegNetModel(A_ )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.LongTensor] = None , A_ : Optional[bool] = None , A_ : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(A_ , output_hidden_states=A_ , return_dict=A_ )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(A_ )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = """single_label_classification"""
else:
__lowercase = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(A_ , A_ )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 616 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: int = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCamelCase ( _A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit"
def __init__( self ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=16 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=16 ,**UpperCAmelCase_ ,):
super().__init__(**UpperCamelCase__ )
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Dict = intermediate_size
_lowercase : str = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = initializer_range
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : Union[str, Any] = image_size
_lowercase : List[str] = patch_size
_lowercase : Tuple = num_channels
_lowercase : Dict = qkv_bias
_lowercase : Any = encoder_stride
class UpperCamelCase ( _A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = version.parse("1.11" )
@property
def lowerCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self ):
return 1E-4
| 710 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : str = 1
@add_end_docstrings(snake_case )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "generated"
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCamelCase__ ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ,):
_lowercase : str = {}
if truncation is not None:
_lowercase : Dict = truncation
_lowercase : Any = generate_kwargs
_lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
_lowercase : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_lowercase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_lowercase : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowercase : Optional[Any] = self.tokenizer.encode(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_lowercase : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
return True
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] ,UpperCAmelCase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_lowercase : int = ([prefix + arg for arg in args[0]],)
_lowercase : Any = True
elif isinstance(args[0] ,UpperCAmelCase_ ):
_lowercase : Optional[int] = (prefix + args[0],)
_lowercase : List[Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_lowercase : List[Any] = self.tokenizer(*UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Union[str, Any] = super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
if (
isinstance(args[0] ,UpperCAmelCase_ )
and all(isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) for el in args[0] )
and all(len(UpperCAmelCase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=TruncationStrategy.DO_NOT_TRUNCATE ,**UpperCAmelCase_ ):
_lowercase : List[str] = self._parse_and_tokenize(UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,**UpperCAmelCase_ )
return inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if self.framework == "pt":
_lowercase , _lowercase : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_lowercase , _lowercase : Tuple = tf.shape(model_inputs["""input_ids"""] ).numpy()
_lowercase : List[str] = generate_kwargs.get("""min_length""" ,self.model.config.min_length )
_lowercase : Any = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
self.check_inputs(UpperCAmelCase_ ,generate_kwargs["""min_length"""] ,generate_kwargs["""max_length"""] )
_lowercase : List[str] = self.model.generate(**UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : str = output_ids.shape[0]
if self.framework == "pt":
_lowercase : List[str] = output_ids.reshape(UpperCAmelCase_ ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
_lowercase : Tuple = tf.reshape(UpperCAmelCase_ ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=ReturnType.TEXT ,UpperCAmelCase_=False ):
_lowercase : List[str] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_lowercase : List[str] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
UpperCAmelCase_ ,skip_special_tokens=UpperCAmelCase_ ,clean_up_tokenization_spaces=UpperCAmelCase_ ,)
}
records.append(UpperCAmelCase_ )
return records
@add_end_docstrings(snake_case )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "summary"
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(snake_case )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "translation"
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,UpperCAmelCase_=TruncationStrategy.DO_NOT_TRUNCATE ,UpperCAmelCase_=None ,UpperCAmelCase_=None ):
if getattr(self.tokenizer ,"""_build_translation_inputs""" ,UpperCAmelCase_ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase_ ,return_tensors=self.framework ,truncation=UpperCAmelCase_ ,src_lang=UpperCAmelCase_ ,tgt_lang=UpperCAmelCase_ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase_ ,truncation=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ):
_lowercase , _lowercase , _lowercase : Any = super()._sanitize_parameters(**UpperCAmelCase_ )
if src_lang is not None:
_lowercase : List[Any] = src_lang
if tgt_lang is not None:
_lowercase : List[str] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_lowercase : Optional[int] = kwargs.get("""task""" ,self.task )
_lowercase : Union[str, Any] = task.split("""_""" )
if task and len(UpperCAmelCase_ ) == 4:
# translation, XX, to YY
_lowercase : Optional[Any] = items[1]
_lowercase : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
| 600 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A ( unittest.TestCase ):
def __init__( self :str , __snake_case :str , __snake_case :Tuple=13 , __snake_case :List[str]=7 , __snake_case :List[str]=True , __snake_case :Dict=True , __snake_case :str=True , __snake_case :Optional[int]=True , __snake_case :Union[str, Any]=99 , __snake_case :List[str]=32 , __snake_case :Tuple=5 , __snake_case :Optional[int]=4 , __snake_case :Any=37 , __snake_case :Any="gelu" , __snake_case :Dict=0.1 , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=5_12 , __snake_case :int=16 , __snake_case :List[Any]=2 , __snake_case :str=0.02 , __snake_case :Dict=4 , ):
'''simple docstring'''
__magic_name__ : int =parent
__magic_name__ : Dict =batch_size
__magic_name__ : List[str] =seq_length
__magic_name__ : Optional[int] =is_training
__magic_name__ : Any =use_attention_mask
__magic_name__ : List[str] =use_token_type_ids
__magic_name__ : Any =use_labels
__magic_name__ : List[Any] =vocab_size
__magic_name__ : Optional[Any] =hidden_size
__magic_name__ : Tuple =num_hidden_layers
__magic_name__ : List[str] =num_attention_heads
__magic_name__ : int =intermediate_size
__magic_name__ : Optional[int] =hidden_act
__magic_name__ : str =hidden_dropout_prob
__magic_name__ : int =attention_probs_dropout_prob
__magic_name__ : str =max_position_embeddings
__magic_name__ : List[str] =type_vocab_size
__magic_name__ : Tuple =type_sequence_label_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Any =num_choices
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[int] =None
if self.use_attention_mask:
__magic_name__ : str =random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Union[str, Any] =None
if self.use_token_type_ids:
__magic_name__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Any =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =config_and_inputs
__magic_name__ : Union[str, Any] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Tuple =FlaxRoFormerModelTester(self )
@slow
def A__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] =model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__snake_case )
__magic_name__ : List[str] =model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class __A ( unittest.TestCase ):
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__magic_name__ : Tuple =jnp.array([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : str =model(__snake_case )[0]
__magic_name__ : int =5_00_00
__magic_name__ : Dict =(1, 6, vocab_size)
self.assertEqual(output.shape , __snake_case )
__magic_name__ : List[Any] =jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 21 |
from __future__ import annotations
def __A(lowerCAmelCase , lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = sum(lowerCAmelCase )
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return result
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> None:
"""simple docstring"""
if sum(lowerCAmelCase ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase )) < max_sum:
return
if sum(lowerCAmelCase ) == max_sum:
result.append(lowerCAmelCase )
return
for index in range(lowerCAmelCase , len(lowerCAmelCase ) ):
create_state_space_tree(
lowerCAmelCase , lowerCAmelCase , index + 1 , [*path, nums[index]] , lowerCAmelCase , remaining_nums_sum - nums[index] , )
lowerCamelCase__ = [3, 34, 4, 12, 5, 2]
lowerCamelCase__ = 9
lowerCamelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 612 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class snake_case :
def __init__( self : str , A : Any , A : Any=1_3 , A : Optional[int]=7 , A : List[Any]=False , A : str=True , A : Dict=False , A : Tuple=False , A : Optional[Any]=1_9 , A : str=3_2 , A : Optional[Any]=5 , A : int=4 , A : Union[str, Any]=3_7 , A : str="gelu" , A : Union[str, Any]=0.1 , A : str=0.1 , A : List[Any]=5_1_2 , A : Union[str, Any]=1_6 , A : int=2 , A : str=0.02 , A : int=3 , A : Tuple=4 , A : List[Any]=None , ):
'''simple docstring'''
a : Union[str, Any] = parent
a : Dict = batch_size
a : Any = seq_length
a : Optional[Any] = is_training
a : Optional[Any] = use_input_mask
a : Optional[Any] = use_token_type_ids
a : Union[str, Any] = use_labels
a : List[str] = vocab_size
a : Optional[int] = hidden_size
a : Tuple = num_hidden_layers
a : Dict = num_attention_heads
a : Optional[int] = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : List[Any] = type_vocab_size
a : Any = type_sequence_label_size
a : int = initializer_range
a : Optional[int] = num_labels
a : Union[str, Any] = num_choices
a : Dict = scope
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[int] = None
if self.use_input_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Dict = None
a : List[Any] = None
a : Optional[Any] = None
if self.use_labels:
a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : int = ids_tensor([self.batch_size] , self.num_choices )
a : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Optional[Any] = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=A , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowerCamelCase__ ( self : Tuple , A : List[str] , A : int , A : Dict , A : Optional[Any] , A : Optional[Any] , A : List[str] ):
'''simple docstring'''
a : Any = EsmForProteinFolding(config=A ).float()
model.to(A )
model.eval()
a : int = model(A , attention_mask=A )
a : str = model(A )
a : Union[str, Any] = model(A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : str = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : int = config_and_inputs
a : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = False
__magic_name__ = (EsmForProteinFolding,) if is_torch_available() else ()
__magic_name__ = ()
__magic_name__ = {} if is_torch_available() else {}
__magic_name__ = False
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Optional[int] = EsmFoldModelTester(self )
a : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip('Does not support attention outputs' )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('ESMFold only has one output format.' )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
@require_torch
class snake_case ( UpperCAmelCase ):
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Any = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
a : Optional[Any] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
a : Union[str, Any] = model(A )['positions']
a : Union[str, Any] = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , A , atol=1E-4 ) )
| 118 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : Dict , *,
A : int = 4 , A : int = 7_6_8 , A : int , A : Any , ):
'''simple docstring'''
super().__init__()
a : Optional[Any] = nn.Parameter(torch.zeros(A ) )
# parameters for additional clip time embeddings
a : str = nn.Linear(A , A )
a : List[Any] = nn.Linear(A , A )
# parameters for encoder hidden states
a : Optional[Any] = clip_extra_context_tokens
a : Union[str, Any] = nn.Linear(
A , self.clip_extra_context_tokens * cross_attention_dim )
a : Optional[Any] = nn.Linear(A , A )
a : Any = nn.LayerNorm(A )
def lowerCamelCase__ ( self : Dict , *, A : str , A : Optional[Any] , A : int , A : Dict ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
a : Dict = image_embeddings.shape[0]
a : Tuple = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
a : List[str] = classifier_free_guidance_embeddings.expand(
A , -1 )
a : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
a : Dict = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
a : Optional[int] = self.embedding_proj(A )
a : Optional[int] = self.clip_image_embeddings_project_to_time_embeddings(A )
a : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
a : Union[str, Any] = self.clip_extra_context_tokens_proj(A )
a : Dict = clip_extra_context_tokens.reshape(A , -1 , self.clip_extra_context_tokens )
a : Union[str, Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
a : Tuple = self.encoder_hidden_states_proj(A )
a : Optional[Any] = self.text_encoder_hidden_states_norm(A )
a : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 118 | 1 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Union[str, Any] = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 528 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=18 , lowerCAmelCase_ : List[str]=30 , lowerCAmelCase_ : Dict=4_00 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=True , ) -> int:
'''simple docstring'''
A__ : List[Any] =size if size is not None else {"""shortest_edge""": 20}
A__ : Dict =crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A__ : Any =parent
A__ : List[Any] =batch_size
A__ : Dict =num_channels
A__ : Optional[int] =image_size
A__ : Union[str, Any] =min_resolution
A__ : Any =max_resolution
A__ : List[Any] =do_resize
A__ : Union[str, Any] =size
A__ : str =do_center_crop
A__ : Dict =crop_size
A__ : int =do_flip_channel_order
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = MobileViTImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =MobileViTImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_flip_channel_order""" ) )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A__ : List[str] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
A__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
A__ : Dict =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : Union[str, Any] =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
# Initialize image_processing
A__ : int =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
A__ : List[str] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : int =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
A__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
A__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : int =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 687 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 1 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : str = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mvp'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: str , _SCREAMING_SNAKE_CASE: int=5_0267 , _SCREAMING_SNAKE_CASE: str=1024 , _SCREAMING_SNAKE_CASE: str=12 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4096 , _SCREAMING_SNAKE_CASE: str=16 , _SCREAMING_SNAKE_CASE: List[Any]=12 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4096 , _SCREAMING_SNAKE_CASE: List[str]=16 , _SCREAMING_SNAKE_CASE: Optional[int]=0.0 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]="gelu" , _SCREAMING_SNAKE_CASE: Dict=1024 , _SCREAMING_SNAKE_CASE: Optional[int]=0.1 , _SCREAMING_SNAKE_CASE: int=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.02 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=False , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: int=1 , _SCREAMING_SNAKE_CASE: str=0 , _SCREAMING_SNAKE_CASE: Optional[Any]=2 , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Dict=2 , _SCREAMING_SNAKE_CASE: Optional[Any]=2 , _SCREAMING_SNAKE_CASE: Optional[int]=False , _SCREAMING_SNAKE_CASE: int=100 , _SCREAMING_SNAKE_CASE: Dict=800 , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : List[Any] = encoder_ffn_dim
__lowerCAmelCase : List[str] = encoder_layers
__lowerCAmelCase : Optional[Any] = encoder_attention_heads
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : Dict = decoder_layers
__lowerCAmelCase : int = decoder_attention_heads
__lowerCAmelCase : Any = dropout
__lowerCAmelCase : str = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Optional[Any] = activation_function
__lowerCAmelCase : Any = init_std
__lowerCAmelCase : List[str] = encoder_layerdrop
__lowerCAmelCase : str = decoder_layerdrop
__lowerCAmelCase : Dict = classifier_dropout
__lowerCAmelCase : Dict = use_cache
__lowerCAmelCase : Tuple = encoder_layers
__lowerCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : Optional[int] = use_prompt
__lowerCAmelCase : Optional[int] = prompt_length
__lowerCAmelCase : Tuple = prompt_mid_dim
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed.") | 293 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: ArgumentParser) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict:
"""simple docstring"""
raise NotImplementedError() | 293 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : Any ):
'''simple docstring'''
_a : Any = data
_a : Node | None = None
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ):
'''simple docstring'''
_a : List[str] = None
_a : Any = None
def __iter__( self : Optional[int] ):
'''simple docstring'''
_a : str = self.head
while self.head:
yield node.data
_a : List[str] = node.next
if node == self.head:
break
def __len__( self : List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : Optional[int] ):
'''simple docstring'''
return "->".join(str(_a ) for item in iter(self ) )
def __lowercase ( self : Dict ,_a : Any ):
'''simple docstring'''
self.insert_nth(len(self ) ,_a )
def __lowercase ( self : int ,_a : Any ):
'''simple docstring'''
self.insert_nth(0 ,_a )
def __lowercase ( self : List[Any] ,_a : int ,_a : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_a : str = Node(_a )
if self.head is None:
_a : Optional[Any] = new_node # first node points itself
_a : Any = new_node
elif index == 0: # insert at head
_a : List[str] = self.head
_a : Tuple = new_node
else:
_a : Optional[Any] = self.head
for _ in range(index - 1 ):
_a : List[str] = temp.next
_a : Union[str, Any] = temp.next
_a : Tuple = new_node
if index == len(self ) - 1: # insert at tail
_a : Any = new_node
def __lowercase ( self : str ):
'''simple docstring'''
return self.delete_nth(0 )
def __lowercase ( self : str ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : Dict ,_a : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_a : Any = self.head
if self.head == self.tail: # just one node
_a : Any = None
elif index == 0: # delete head node
_a : Optional[int] = self.tail.next.next
_a : Tuple = self.head.next
else:
_a : Optional[Any] = self.head
for _ in range(index - 1 ):
_a : Optional[int] = temp.next
_a : Optional[int] = temp.next
_a : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_a : int = temp
return delete_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
return len(self ) == 0
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = CircularLinkedList()
assert len(__a ) == 0
assert circular_linked_list.is_empty() is True
assert str(__a ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__a ) == i
circular_linked_list.insert_nth(__a , i + 1 )
assert str(__a ) == "->".join(str(__a ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__a ) == "->".join(str(__a ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__a ) == "->".join(str(__a ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__a ) == "->".join(str(__a ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__a ) == "->".join(str(__a ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase = [8, 5, 9, 7]
__lowerCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : list[int] ,_a : list[list[int]] ,_a : list[list[int]] ,):
'''simple docstring'''
_a : Dict = claim_vector
_a : List[str] = allocated_resources_table
_a : List[Any] = maximum_claim_table
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowercase ( self : int ):
'''simple docstring'''
return {self.__need().index(_a ): i for i in self.__need()}
def __lowercase ( self : Optional[Any] ,**_a : Dict ):
'''simple docstring'''
_a : Optional[int] = self.__need()
_a : str = self.__allocated_resources_table
_a : int = self.__available_resources()
_a : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_a : List[str] = False
for each_need in need_list:
_a : List[str] = True
for index, need in enumerate(_a ):
if need > available_resources[index]:
_a : Dict = False
break
if execution:
_a : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : int = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(_a )
# update available/freed resources stack
_a : Optional[int] = np.array(_a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowercase ( self : Tuple ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.