code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase__ :Dict = False
class lowercase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
lowercase = torch.manual_seed(0)
lowercase = pipe.dual_guided(
prompt='''first prompt''' ,image=A__ ,text_to_image_strength=0.75 ,generator=A__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A__)
lowercase = VersatileDiffusionPipeline.from_pretrained(A__ ,torch_dtype=torch.floataa)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = generator.manual_seed(0)
lowercase = pipe.dual_guided(
prompt='''first prompt''' ,image=A__ ,text_to_image_strength=0.75 ,generator=A__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def A__ ( self):
lowercase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = '''cyberpunk 2077'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
lowercase = torch.manual_seed(0)
lowercase = pipe.dual_guided(
prompt=A__ ,image=A__ ,text_to_image_strength=0.75 ,generator=A__ ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ,).images
lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
lowercase = '''A painting of a squirrel eating a burger '''
lowercase = torch.manual_seed(0)
lowercase = pipe.text_to_image(
prompt=A__ ,generator=A__ ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''').images
lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
lowercase = pipe.image_variation(A__ ,generator=A__ ,output_type='''numpy''').images
lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
| 101 |
from __future__ import annotations
import math
def __UpperCamelCase ( _A : int , _A : int , _A : bool , _A : list[int] , _A : float ) ->int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(_A ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
return min(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =[90, 23, 6, 33, 21, 65, 123, 34423]
lowerCamelCase_ =math.log(len(_A ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , _A , _A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 154 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
snake_case : Optional[int] = mock.Mock()
snake_case : Any = 500
snake_case : str = {}
snake_case : int = HTTPError
snake_case : Any = {}
# Download this model to make sure it's in the cache.
snake_case : Tuple = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase__ ) as mock_head:
snake_case : Any = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : str = mock.Mock()
snake_case : Tuple = 500
snake_case : Dict = {}
snake_case : List[Any] = HTTPError
snake_case : Optional[int] = {}
# Download this model to make sure it's in the cache.
snake_case : Optional[Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase__ ) as mock_head:
snake_case : str = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
try:
snake_case : Any = tempfile.mktemp()
with open(UpperCamelCase__ , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , UpperCamelCase__ )
snake_case : List[str] = AlbertTokenizer.from_pretrained(UpperCamelCase__ )
finally:
os.remove(UpperCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , UpperCamelCase__ )
snake_case : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case : List[str] = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def lowerCAmelCase ( cls : int ) -> Optional[int]:
"""simple docstring"""
snake_case : List[Any] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def lowerCAmelCase ( cls : str ) -> str:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Any = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
snake_case : Dict = BertTokenizer(UpperCamelCase__ )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
snake_case : int = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ , repo_id='''test-tokenizer''' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
snake_case : Optional[Any] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Optional[int] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
snake_case : Tuple = BertTokenizer(UpperCamelCase__ )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
snake_case : str = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase__ , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
snake_case : Optional[int] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Dict = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
snake_case : int = CustomTokenizer(UpperCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
snake_case : Any = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : int = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
snake_case : Optional[Any] = BertTokenizerFast.from_pretrained(UpperCamelCase__ )
bert_tokenizer.save_pretrained(UpperCamelCase__ )
snake_case : List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase__ )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
snake_case : str = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
snake_case : Any = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' , use_fast=UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[Any] = Trie()
trie.add('''Hello ๅ้''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''ๅ''': {'''้''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''ๅ''': {'''้''': {'''''': 1}}}}}}}}} )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
snake_case : Tuple = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
snake_case : str = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case : Any = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case : Any = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case : str = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
snake_case : str = Trie()
snake_case : List[Any] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase__ , ['''AB''', '''C'''] )
| 83 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model'}
lowerCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __A ( A_ ):
'''simple docstring'''
def __init__( self : str ,_snake_case : Tuple ,_snake_case : int=False ,_snake_case : Union[str, Any]=True ,_snake_case : List[Any]=False ,_snake_case : Dict="<s>" ,_snake_case : Optional[Any]="</s>" ,_snake_case : List[str]="<unk>" ,_snake_case : int="<sep>" ,_snake_case : List[Any]="<pad>" ,_snake_case : Union[str, Any]="<cls>" ,_snake_case : str="<mask>" ,_snake_case : int=["<eop>", "<eod>"] ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : Optional[int] ,) -> None:
"""simple docstring"""
lowercase__ : Union[str, Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token
lowercase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case ,remove_space=_snake_case ,keep_accents=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,additional_special_tokens=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : Any = 3
lowercase__ : Union[str, Any] = do_lower_case
lowercase__ : Optional[int] = remove_space
lowercase__ : Optional[Any] = keep_accents
lowercase__ : List[str] = vocab_file
lowercase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ : Union[str, Any] = jieba
lowercase__ : Tuple = str.maketrans(''' \n''' ,'''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : Any = self.__dict__.copy()
lowercase__ : Optional[int] = None
return state
def __setstate__( self : List[str] ,_snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if self.remove_space:
lowercase__ : str = ''' '''.join(inputs.strip().split() )
else:
lowercase__ : List[str] = inputs
lowercase__ : Tuple = outputs.replace('''``''' ,'''"''' ).replace('''\'\'''' ,'''"''' )
if not self.keep_accents:
lowercase__ : Tuple = unicodedata.normalize('''NFKD''' ,_snake_case )
lowercase__ : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
lowercase__ : List[str] = outputs.lower()
return outputs
def UpperCAmelCase ( self : int ,_snake_case : str ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = self.preprocess_text(_snake_case )
lowercase__ : int = self.sp_model.encode(_snake_case ,out_type=_snake_case )
lowercase__ : List[Any] = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case ,'''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ : int = cur_pieces[1:]
else:
lowercase__ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def UpperCAmelCase ( self : List[str] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.PieceToId(_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Tuple ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : List[str] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def UpperCAmelCase ( self : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ : Union[str, Any] = super()._decode(*_snake_case ,**_snake_case )
lowercase__ : Tuple = text.replace(''' ''' ,'''''' ).replace('''\u2582''' ,''' ''' ).replace('''\u2583''' ,'''\n''' )
return text
| 16 |
"""simple docstring"""
from typing import List
import numpy as np
def lowercase ( a__ : dict ) -> int:
_UpperCamelCase = {key: len(a__ ) for key, value in gen_kwargs.items() if isinstance(a__ , a__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_UpperCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , a__ )
def lowercase ( a__ : int , a__ : int ) -> List[range]:
_UpperCamelCase = []
for group_idx in range(a__ ):
_UpperCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_UpperCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_UpperCamelCase = range(a__ , start + num_shards_to_add )
shards_indices_per_group.append(a__ )
return shards_indices_per_group
def lowercase ( a__ : dict , a__ : int ) -> List[dict]:
_UpperCamelCase = _number_of_shards_in_gen_kwargs(a__ )
if num_shards == 1:
return [dict(a__ )]
else:
_UpperCamelCase = _distribute_shards(num_shards=a__ , max_num_jobs=a__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(a__ , a__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(a__ ) )
]
def lowercase ( a__ : List[dict] ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , a__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowercase ( a__ : np.random.Generator , a__ : dict ) -> dict:
_UpperCamelCase = {len(a__ ) for value in gen_kwargs.values() if isinstance(a__ , a__ )}
_UpperCamelCase = {}
for size in list_sizes:
_UpperCamelCase = list(range(a__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_UpperCamelCase = dict(a__ )
for key, value in shuffled_kwargs.items():
if isinstance(a__ , a__ ):
_UpperCamelCase = [value[i] for i in indices_per_size[len(a__ )]]
return shuffled_kwargs
| 256 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase: List[str] = 'examples/'
lowerCAmelCase: List[Any] = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase: str = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase: str = 'README.md'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a , a : Tuple = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , _A )
a : Dict = re_pattern.sub(_A , _A )
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_A )
def lowerCamelCase__ ( _A ):
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern='examples' )
def lowerCamelCase__ ( _A , _A=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def lowerCamelCase__ ( ):
a : Tuple = '๐ค Transformers currently provides the following architectures'
a : Any = '1. Want to contribute a new model?'
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.readlines()
# Find the start of the list.
a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : List[Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_A )
def lowerCamelCase__ ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : Union[str, Any] = f.read()
a : Tuple = REPLACE_PATTERNS['init'][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def lowerCamelCase__ ( _A=False ):
a : int = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Any = default_version.base_version
elif patch:
a : Dict = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
a : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
a : List[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
a : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
a : int = get_version()
a : Any = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
a : int = current_version.base_version
# Check with the user we got that right.
a : Tuple = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
a : Optional[int] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_A )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 96 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase_ ( self : int ):
super().setUp()
a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'ใใใซใกใฏ',
'ใใ',
'ใซใกใฏ',
'ใฐใใฏ',
'##ใใ',
'##ใซใกใฏ',
'##ใฐใใฏ',
'ไธ็',
'##ไธ็',
'ใ',
'##ใ',
'ใ',
'##ใ',
]
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Any , __snake_case : str ):
a : Union[str, Any] = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ'
a : List[Any] = 'ใใใซใกใฏ ใ ไธ็ ใ ใใใฐใใฏ ใ ไธ็ ใ'
return input_text, output_text
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a , a : List[str] = self.get_input_output_texts(__snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : str = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
return text, ids
def lowercase_ ( self : Optional[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : Dict ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file )
a : Optional[int] = tokenizer.tokenize('ใใใซใกใฏใไธ็ใ\nใใใฐใใฏใไธ็ใ' )
self.assertListEqual(__snake_case , ['ใใใซใกใฏ', 'ใ', 'ไธ็', 'ใ', 'ใใ', '##ใฐใใฏ', 'ใ', 'ไธ็', 'ใ'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(__snake_case )
a : List[str] = 'ใใใซใกใฏใไธ็ใ\nใใใฐใใฏใไธ็ใ'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['ใใใซใกใฏ', 'ใ', 'ไธ็', 'ใ', 'ใใ', '##ใฐใใฏ', 'ใ', 'ไธ็', 'ใ'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[Any] = pickle.load(__snake_case )
a : Tuple = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : Dict ):
a : List[str] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['ใขใใใซในใใข', 'ใง', 'iPhone', '8', 'ใ', '็บๅฃฒ', 'ใ', 'ใ', 'ใ', 'ใ'] , )
def lowercase_ ( self : List[Any] ):
try:
a : int = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['ใขใใใซ', 'ในใใข', 'ใง', 'iPhone', '8', 'ใ', '็บๅฃฒ', 'ใ', 'ใ', 'ใ', 'ใ'] , )
def lowercase_ ( self : Any ):
try:
a : Union[str, Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['ใขใใใซ', 'ในใใข', 'ใง', 'iPhone', '8', 'ใ', '็บๅฃฒ', 'ใ', 'ใ', 'ใ', 'ใ'] , )
def lowercase_ ( self : str ):
a : Tuple = MecabTokenizer(do_lower_case=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['ใขใใใซในใใข', 'ใง', 'iphone', '8', 'ใ', '็บๅฃฒ', 'ใ', 'ใ', 'ใ', 'ใ'] , )
def lowercase_ ( self : Union[str, Any] ):
try:
a : Any = MecabTokenizer(
do_lower_case=__snake_case , normalize_text=__snake_case , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใข', 'ใง', 'iPhone', '๏ผ', 'ใ', '็บๅฃฒ', 'ใ', 'ใใ', '\u3000', 'ใ'] , )
def lowercase_ ( self : List[Any] ):
a : Dict = MecabTokenizer(normalize_text=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใข', 'ใง', 'iPhone', '๏ผ', 'ใ', '็บๅฃฒ', 'ใ', 'ใ', 'ใ', 'ใ', 'ใ'] , )
@require_sudachi
def lowercase_ ( self : str ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(__snake_case )
a : List[Any] = 'ใใใซใกใฏใไธ็ใ\nใใใฐใใฏใไธ็ใ'
a : int = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['ใใใซใกใฏ', 'ใ', 'ไธ็', 'ใ', 'ใใ', '##ใฐใใฏ', 'ใ', 'ไธ็', 'ใ'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[int] = pickle.load(__snake_case )
a : List[Any] = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_sudachi
def lowercase_ ( self : List[Any] ):
a : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , [' ', '\t', 'ใขใใใซ', 'ในใใข', 'ใง', 'iPhone', '8', ' ', 'ใ', ' ', ' ', '\n ', '็บๅฃฒ', 'ใ', 'ใ', 'ใ', ' ', 'ใ', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Any ):
a : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('ๅคๅฝไบบๅๆฟๆจฉ' ) , ['ๅคๅฝ', 'ไบบ', 'ๅๆฟ', 'ๆจฉ'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('ๅคๅฝไบบๅๆฟๆจฉ' ) , ['ๅคๅฝไบบ', 'ๅๆฟๆจฉ'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('ๅคๅฝไบบๅๆฟๆจฉ' ) , ['ๅคๅฝไบบๅๆฟๆจฉ'] )
@require_sudachi
def lowercase_ ( self : Dict ):
a : Optional[int] = SudachiTokenizer(do_lower_case=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , [' ', '\t', 'ใขใใใซ', 'ในใใข', 'ใง', 'iphone', '8', ' ', 'ใ', ' ', ' ', '\n ', '็บๅฃฒ', 'ใ', 'ใ', 'ใ', ' ', 'ใ', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Tuple ):
a : int = SudachiTokenizer(normalize_text=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , [' ', '\t', '๏ฝฑ๏ฝฏ๏พ๏พ๏พ', 'ในใใข', 'ใง', 'iPhone', '๏ผ', ' ', 'ใ', ' ', ' ', '\n ', '็บๅฃฒ', 'ใ', 'ใ', 'ใ', '\u3000', 'ใ', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = SudachiTokenizer(trim_whitespace=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['ใขใใใซ', 'ในใใข', 'ใง', 'iPhone', '8', 'ใ', '็บๅฃฒ', 'ใ', 'ใ', 'ใ', 'ใ'] , )
@require_jumanpp
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(__snake_case )
a : str = 'ใใใซใกใฏใไธ็ใ\nใใใฐใใฏใไธ็ใ'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['ใใใซใกใฏ', 'ใ', 'ไธ็', 'ใ', 'ใใ', '##ใฐใใฏ', 'ใ', 'ไธ็', 'ใ'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : List[str] = pickle.load(__snake_case )
a : Any = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['ใขใใใซ', 'ในใใข', 'ใง', 'iPhone', '8', '\u3000', 'ใ', '\u3000', '\u3000', '\u3000', '็บๅฃฒ', 'ใ', 'ใใ', '\u3000', 'ใ'] , )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : List[Any] = JumanppTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['ใขใใใซ', 'ในใใข', 'ใง', 'iphone', '8', '\u3000', 'ใ', '\u3000', '\u3000', '\u3000', '็บๅฃฒ', 'ใ', 'ใใ', '\u3000', 'ใ'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : List[Any] = JumanppTokenizer(normalize_text=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['๏ฝฑ', '๏ฝฏ', '๏พ', '๏พ', '๏พ', 'ในใใข', 'ใง', 'iPhone', '๏ผ', '\u3000', 'ใ', '\u3000', '\u3000', '\u3000', '็บๅฃฒ', 'ใ', 'ใใ', '\u3000', 'ใ'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : str = JumanppTokenizer(trim_whitespace=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ ' ) , ['ใขใใใซ', 'ในใใข', 'ใง', 'iPhone', '8', 'ใ', '็บๅฃฒ', 'ใ', 'ใใ', 'ใ'] , )
@require_jumanpp
def lowercase_ ( self : Tuple ):
a : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ใใใใจใใใใใพใm(_ _)๏ฝ่ฆใคใใใฎใๅคงๅคใงใใ' ) , ['ใใใใจใ', 'ใใใใพใ', 'm(_ _)m', '่ฆใคใใ', 'ใฎ', 'ใ', 'ๅคงๅคใงใ', 'ใ'] , )
def lowercase_ ( self : Any ):
a : int = ['[UNK]', '[CLS]', '[SEP]', 'ใใใซใกใฏ', 'ใใ', 'ใซใกใฏ', 'ใฐใใฏ', '##ใใ', '##ใซใกใฏ', '##ใฐใใฏ']
a : Optional[int] = {}
for i, token in enumerate(__snake_case ):
a : Dict = i
a : Optional[Any] = WordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('ใใใซใกใฏ' ) , ['ใใใซใกใฏ'] )
self.assertListEqual(tokenizer.tokenize('ใใใฐใใฏ' ) , ['ใใ', '##ใฐใใฏ'] )
self.assertListEqual(tokenizer.tokenize('ใใใฐใใฏ ใใใฐใใซใกใฏ ใใใซใกใฏ' ) , ['ใใ', '##ใฐใใฏ', '[UNK]', 'ใใใซใกใฏ'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
a : List[Any] = tokenizer.subword_tokenizer
a : List[str] = subword_tokenizer.tokenize('ๅฝๅข ใฎ ้ทใ ใใณใใซ ใ ๆใใ ใจ ้ชๅฝ ใงใใฃใ ใ' )
self.assertListEqual(__snake_case , ['โๅฝๅข', 'โใฎ', 'โ้ทใ', 'โใใณใใซ', 'โใ', 'โๆใใ', 'โใจ', 'โ้ช', 'ๅฝ', 'โใงใใฃใ', 'โใ'] )
a : Union[str, Any] = subword_tokenizer.tokenize('ใใใฐใใฏ ใใใฐใ ใซใก ใฏ ใใใซใกใฏ' )
self.assertListEqual(__snake_case , ['โใใ', 'ใฐใ', 'ใฏ', 'โใใ', 'ใฐใ', 'โใซ', 'ใก', 'โใฏ', 'โใใใซใกใฏ'] )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
a : Dict = tokenizer.encode('ใใใใจใใ' , add_special_tokens=__snake_case )
a : str = tokenizer.encode('ใฉใใใใใพใใฆใ' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase_ ( self : List[Any] ):
super().setUp()
a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'ใ', 'ใ', 'ใซ', 'ใก', 'ใฏ', 'ใฐ', 'ไธ', '็', 'ใ', 'ใ']
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Optional[Any] , **__snake_case : List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **__snake_case )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
a : int = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ'
a : Optional[Any] = 'ใ ใ ใซ ใก ใฏ ใ ไธ ็ ใ ใ ใ ใฐ ใ ใฏ ใ ไธ ็ ใ'
return input_text, output_text
def lowercase_ ( self : str ):
pass # TODO add if relevant
def lowercase_ ( self : List[str] ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
a : Tuple = tokenizer.tokenize('ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ' )
self.assertListEqual(
__snake_case , ['ใ', 'ใ', 'ใซ', 'ใก', 'ใฏ', 'ใ', 'ไธ', '็', 'ใ', 'ใ', 'ใ', 'ใฐ', 'ใ', 'ใฏ', 'ใ', 'ไธ', '็', 'ใ'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase_ ( self : Any ):
a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'ใ', 'ใ', 'ใซ', 'ใก', 'ใฏ', 'ใฐ', 'ไธ', '็', 'ใ', 'ใ']
a : Optional[Any] = {}
for i, token in enumerate(__snake_case ):
a : Tuple = i
a : Optional[int] = CharacterTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('ใใใซใกใฏ' ) , ['ใ', 'ใ', 'ใซ', 'ใก', 'ใฏ'] )
self.assertListEqual(tokenizer.tokenize('ใใใซใกใป' ) , ['ใ', 'ใ', 'ใซ', 'ใก', '[UNK]'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
a : Optional[int] = tokenizer.encode('ใใใใจใใ' , add_special_tokens=__snake_case )
a : List[str] = tokenizer.encode('ใฉใใใใใพใใฆใ' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
a : List[Any] = 'cl-tohoku/bert-base-japanese'
a : Dict = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
class a__( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
a : Dict = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 96 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A = InstructBlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).qformer_tokenizer
def UpperCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> str:
_A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A = qformer_tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> int:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 180 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any = '''resnet'''
lowerCamelCase :Any = ['''basic''', '''bottleneck''']
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=64 , lowerCAmelCase_=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_=[3, 4, 6, 3] , lowerCAmelCase_="bottleneck" , lowerCAmelCase_="relu" , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_A = num_channels
_A = embedding_size
_A = hidden_sizes
_A = depths
_A = layer_type
_A = hidden_act
_A = downsample_in_first_stage
_A = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_A , _A = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = version.parse('''1.11''' )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1E-3
| 180 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _SCREAMING_SNAKE_CASE ):
__snake_case = 'roformer'
def __init__( self, UpperCamelCase__=5_0000, UpperCamelCase__=None, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=1536, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=0, UpperCamelCase__=False, UpperCamelCase__=True, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size if embedding_size is None else embedding_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = rotary_value
lowerCAmelCase_ = use_cache
class A ( _SCREAMING_SNAKE_CASE ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ = {0: """batch""", 1: """sequence"""}
lowerCAmelCase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A ( __UpperCAmelCase ):
__snake_case = 'gpt_neox'
def __init__( self, UpperCamelCase__=5_0432, UpperCamelCase__=6144, UpperCamelCase__=44, UpperCamelCase__=64, UpperCamelCase__=2_4576, UpperCamelCase__="gelu", UpperCamelCase__=0.25, UpperCamelCase__=1_0000, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = rotary_pct
lowerCAmelCase_ = rotary_emb_base
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = classifier_dropout
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = use_parallel_residual
lowerCAmelCase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
lowerCAmelCase_ = self.rope_scaling.get('''type''', UpperCamelCase__ )
lowerCAmelCase_ = self.rope_scaling.get('''factor''', UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__, UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 167 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :str = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :str = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
| 177 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _lowerCAmelCase ( lowercase ) -> Dict:
for i in range(0 , lowercase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
for i in range(lowercase , 0 , -1 ):
for _ in range(lowercase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(lowercase ) # upper half
reverse_floyd(lowercase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_a : List[str] = 1
while K:
_a : Dict = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_a : Optional[int] = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 46 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : List[str] = """โ"""
_a : Optional[int] = {"""vocab_file""": """spiece.model"""}
_a : int = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
_a : int = {
"""google/pegasus-xsum""": 5_1_2,
}
_a : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] =VOCAB_FILES_NAMES
a : Tuple =VOCAB_FILES_NAMES
a : Any =PRETRAINED_VOCAB_FILES_MAP
a : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =["""input_ids""", """attention_mask"""]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<mask_2>",__SCREAMING_SNAKE_CASE="<mask_1>",__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=1_03,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise TypeError(
f'additional_special_tokens should be of type {type(__SCREAMING_SNAKE_CASE )}, but is'
f' {type(__SCREAMING_SNAKE_CASE )}' )
__lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(__SCREAMING_SNAKE_CASE ),self.offset - 1 )
]
if len(set(__SCREAMING_SNAKE_CASE ) ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__lowerCAmelCase = additional_special_tokens_extended
else:
__lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2,self.offset )]
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,mask_token_sent=__SCREAMING_SNAKE_CASE,offset=__SCREAMING_SNAKE_CASE,additional_special_tokens=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = mask_token_sent
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
__lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1,self.offset - 1 )} )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase = self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
return 1
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(__SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(__SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 46 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def snake_case_ ( A_ : str = "isbn/0140328726" ):
'''simple docstring'''
_lowerCamelCase : List[str] = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
_lowerCamelCase : Tuple = F'''{olid} is not a valid Open Library olid'''
raise ValueError(A_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def snake_case_ ( A_ : dict ):
'''simple docstring'''
_lowerCamelCase : List[str] = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
_lowerCamelCase : Union[str, Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowerCamelCase : Dict = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
_lowerCamelCase : List[str] = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(A_, A_ ):
_lowerCamelCase : Any = ''', '''.join(A_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase__ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
lowerCAmelCase__ = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 72 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCamelCase: List[Any] = logging.getLogger(__name__)
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether tp freeze the encoder.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_lowerCamelCase = field(
default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}, )
_lowerCamelCase = field(
default=1_024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=128, metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(default=-1, metadata={'help': '# training examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# validation examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# test examples. -1 means use all.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Source language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Target language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': '# num_beams to use for evaluation.'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'}, )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , f'''{split}_results.json''' ) )
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : int = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : Dict = SeqaSeqDataset
# Get datasets
lowercase : int = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
lowercase : str = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Optional[Any] = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase ) if training_args.predict_with_generate else None
)
lowercase : List[Any] = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
lowercase : List[Any] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
lowercase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : List[str] = train_result.metrics
lowercase : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase : Tuple = trainer.evaluate(metric_key_prefix='val' )
lowercase : Dict = data_args.n_val
lowercase : Tuple = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
lowercase : List[Any] = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test' )
lowercase : str = test_output.metrics
lowercase : Dict = data_args.n_test
if trainer.is_world_process_zero():
lowercase : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.predict_with_generate:
lowercase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
lowercase : Tuple = lmap(str.strip , _UpperCAmelCase )
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 255 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
# TODO Update this
__snake_case ={
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''esm'''
def __init__( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=7_6_8 , UpperCAmelCase__ : int=1_2 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : List[Any]=3_0_7_2 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=1_0_2_6 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Any=1E-12 , UpperCAmelCase__ : Dict="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Tuple=None , **UpperCAmelCase__ : List[str] , ) -> str:
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = emb_layer_norm_before
lowerCAmelCase = token_dropout
lowerCAmelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
lowerCAmelCase = EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = EsmFoldConfig(**UpperCAmelCase__ )
lowerCAmelCase = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
lowerCAmelCase = get_default_vocab_list()
else:
lowerCAmelCase = vocab_list
else:
lowerCAmelCase = None
lowerCAmelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , UpperCAmelCase__ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase = super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
lowerCAmelCase = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 128
lowerCamelCase : "TrunkConfig" = None
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
if self.trunk is None:
lowerCAmelCase = TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
lowerCAmelCase = TrunkConfig(**self.trunk )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = asdict(self )
lowerCAmelCase = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int = 48
lowerCamelCase : int = 1_024
lowerCamelCase : int = 128
lowerCamelCase : int = 32
lowerCamelCase : int = 32
lowerCamelCase : int = 32
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 128
lowerCamelCase : "StructureModuleConfig" = None
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
if self.structure_module is None:
lowerCAmelCase = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
lowerCAmelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowerCAmelCase = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = asdict(self )
lowerCAmelCase = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int = 384
lowerCamelCase : int = 128
lowerCamelCase : int = 16
lowerCamelCase : int = 128
lowerCamelCase : int = 12
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 10
lowerCamelCase : float = 1E-8
lowerCamelCase : float = 1E5
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return asdict(self )
def a_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 367 |
'''simple docstring'''
def a_ ( lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Tuple = ['pixel_values']
def __init__(self : List[Any] , a__ : Optional[int] = True , a__ : Any = None , a__ : Optional[Any] = PILImageResampling.BICUBIC , a__ : Dict = True , a__ : int = None , a__ : Optional[Any] = True , a__ : int = 1 / 255 , a__ : Union[str, Any] = True , a__ : List[str] = None , a__ : Union[str, Any] = None , a__ : Dict = True , **a__ : str , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = size if size is not None else {"""shortest_edge""": 224}
__snake_case = get_size_dict(a__ , default_to_square=a__ )
__snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__snake_case = get_size_dict(a__ , default_to_square=a__ , param_name='''crop_size''' )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case = do_convert_rgb
def a (self : List[Any] , a__ : Optional[int] , a__ : str , a__ : List[str] = PILImageResampling.BICUBIC , a__ : Dict = None , **a__ : List[Any] , ):
"""simple docstring"""
__snake_case = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__snake_case = get_resize_output_image_size(a__ , size=size['''shortest_edge'''] , default_to_square=a__ )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def a (self : Union[str, Any] , a__ : Optional[Any] , a__ : int , a__ : Dict = None , **a__ : Optional[Any] , ):
"""simple docstring"""
__snake_case = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def a (self : str , a__ : Any , a__ : Tuple , a__ : Dict = None , **a__ : Optional[Any] , ):
"""simple docstring"""
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def a (self : Optional[Any] , a__ : str , a__ : Union[str, Any] , a__ : Dict , a__ : Union[str, Any] = None , **a__ : Optional[int] , ):
"""simple docstring"""
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def a (self : Any , a__ : Optional[int] , a__ : Optional[int] = None , a__ : Union[str, Any] = None , a__ : int = None , a__ : List[str] = None , a__ : str = None , a__ : List[Any] = None , a__ : Any = None , a__ : Tuple = None , a__ : Dict = None , a__ : List[str] = None , a__ : Any = None , a__ : List[Any] = None , a__ : Tuple = ChannelDimension.FIRST , **a__ : List[str] , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(a__ , param_name='''size''' , default_to_square=a__ )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(a__ , param_name='''crop_size''' , default_to_square=a__ )
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case = [convert_to_rgb(a__ ) for image in images]
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(a__ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
__snake_case = [to_channel_dimension_format(a__ , a__ ) for image in images]
__snake_case = {"""pixel_values""": images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 24 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def _lowercase( self , A = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]:
if isinstance(A , A ):
UpperCAmelCase : List[str] = 1
elif isinstance(A , A ):
UpperCAmelCase : Dict = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
UpperCAmelCase : List[str] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape
UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 )
UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 )
UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : Dict = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : int = torch.randn(
A , generator=A , device=self.device , dtype=A )
UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx
UpperCAmelCase : List[str] = 0 if dy < 0 else dy
UpperCAmelCase : Union[str, Any] = max(-dx , 0 )
UpperCAmelCase : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
UpperCAmelCase , UpperCAmelCase : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase : Any = None
if output_type == "pil":
UpperCAmelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __a( __A ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = 1.0 if scale is None else scale
UpperCAmelCase_ : str = 0.0 if loc is None else loc
super().__init__(__lowercase ,[AffineTransform(loc=self.loc ,scale=self.scale ,event_dim=__lowercase )] )
@property
def a__ ( self ) -> Dict:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self ) -> Dict:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self ) -> Any:
return self.variance.sqrt()
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
super().__init__(**__lowercase )
UpperCAmelCase_ : Optional[Any] = args_dim
UpperCAmelCase_ : Optional[Any] = nn.ModuleList([nn.Linear(__lowercase ,__lowercase ) for dim in args_dim.values()] )
UpperCAmelCase_ : int = domain_map
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : Tuple = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : Optional[Any] = function
def a__ ( self ,_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return self.function(__lowercase ,*__lowercase )
class __a:
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
def __init__( self ,_SCREAMING_SNAKE_CASE = 1 ) -> str:
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Optional[Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) ,1 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
UpperCAmelCase_ : List[str] = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase ,loc=__lowercase ,scale=__lowercase ,event_dim=self.event_dim )
@property
def a__ ( self ) -> Optional[int]:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self ) -> Any:
return len(self.event_shape )
@property
def a__ ( self ) -> Any:
return 0.0
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
return ParameterProjection(
in_features=__lowercase ,args_dim=self.args_dim ,domain_map=LambdaLayer(self.domain_map ) ,)
def a__ ( self ,*_SCREAMING_SNAKE_CASE ) -> Optional[int]:
raise NotImplementedError()
@staticmethod
def a__ ( _SCREAMING_SNAKE_CASE ) -> int:
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class __a( __A ):
"""simple docstring"""
lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase = StudentT
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : Dict = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ : int = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __a( __A ):
"""simple docstring"""
lowerCAmelCase = {"loc": 1, "scale": 1}
lowerCAmelCase = Normal
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : List[Any] = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __a( __A ):
"""simple docstring"""
lowerCAmelCase = {"total_count": 1, "logits": 1}
lowerCAmelCase = NegativeBinomial
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Union[str, Any] = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase ,logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase ,logits=__lowercase ) ,1 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ) -> str:
UpperCAmelCase_, UpperCAmelCase_ : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 359 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=19 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,) -> Dict:
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : List[str] = scope
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : str = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=_SCREAMING_SNAKE_CASE ,esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} ,)
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Optional[int] = EsmForProteinFolding(config=_SCREAMING_SNAKE_CASE ).float()
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Optional[Any] = config_and_inputs
UpperCAmelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = False
lowerCAmelCase = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCAmelCase = ()
lowerCAmelCase = {} if is_torch_available() else {}
lowerCAmelCase = False
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = EsmFoldModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip
def a__ ( self ) -> Dict:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def a__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def a__ ( self ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Dict:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def a__ ( self ) -> str:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def a__ ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def a__ ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def a__ ( self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__ ( self ) -> List[Any]:
pass
@require_torch
class __a( _a ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCAmelCase_ : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )['''positions''']
UpperCAmelCase_ : List[str] = torch.tensor([2.58_28, 0.79_93, -10.93_34] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 235 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = Github(os.environ["""GITHUB_TOKEN"""] )
_snake_case : int = g.get_repo("""huggingface/diffusers""" )
_snake_case : int = repo.get_issues(state="""open""" )
for issue in open_issues:
_snake_case : int = sorted(issue.get_comments() , key=lambda snake_case__ : i.created_at , reverse=__SCREAMING_SNAKE_CASE )
_snake_case : Union[str, Any] = comments[0] if len(__SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 64 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'็',
'ไปท',
'ๆ ผ',
'ๆฏ',
'15',
'ไพฟ',
'alex',
'##andra',
'๏ผ',
'ใ',
'-',
't',
'shirt',
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
lowercase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = self.get_image_processor()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
lowercase = self.get_image_processor(do_normalize=snake_case )
lowercase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(snake_case , return_tensors='np' )
lowercase = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra๏ผT-shirt็ไปทๆ ผๆฏ15ไพฟๅฃซใ'
lowercase = processor(text=snake_case )
lowercase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra๏ผT-shirt็ไปทๆ ผๆฏ15ไพฟๅฃซใ'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(snake_case )
lowercase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra๏ผT-shirt็ไปทๆ ผๆฏ15ไพฟๅฃซใ'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 195 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
UpperCamelCase : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase : Optional[int] = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
UpperCamelCase : Any = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase : Optional[Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.get_tokenizer()
UpperCamelCase : str = self.get_image_processor()
UpperCamelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase : List[str] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : str = self.get_tokenizer()
UpperCamelCase : List[str] = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.prepare_image_inputs()
UpperCamelCase : Optional[int] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
UpperCamelCase : List[str] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.get_image_processor()
UpperCamelCase : List[Any] = self.get_tokenizer()
UpperCamelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = '''lower newer'''
UpperCamelCase : List[Any] = processor(text=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_image_processor()
UpperCamelCase : Optional[Any] = self.get_tokenizer()
UpperCamelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = '''lower newer'''
UpperCamelCase : str = self.prepare_image_inputs()
UpperCamelCase : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
processor()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.get_image_processor()
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : List[Any] = processor.batch_decode(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_image_processor()
UpperCamelCase : Union[str, Any] = self.get_tokenizer()
UpperCamelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''lower newer'''
UpperCamelCase : Optional[Any] = self.prepare_image_inputs()
UpperCamelCase : Any = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 315 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# Initialise PyTorch model
UpperCamelCase :Any = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase :List[str] = BertForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 259 |
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# Return True if there is node that has not iterated.
UpperCamelCase :Tuple = [False] * len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = []
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = True
while queue:
UpperCamelCase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Optional[int] = u
return visited[t]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
# This array is filled by BFS and to store path
UpperCamelCase :Optional[int] = [-1] * (len(SCREAMING_SNAKE_CASE__ ))
UpperCamelCase :Optional[int] = 0
while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Dict = float('''Inf''' )
UpperCamelCase :str = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase :Optional[Any] = min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] )
UpperCamelCase :Any = parent[s]
max_flow += path_flow
UpperCamelCase :Tuple = sink
while v != source:
UpperCamelCase :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase :Any = parent[v]
return max_flow
__snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__snake_case , __snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'biogpt'
def __init__( self : str , lowercase_ : List[str]=4_2384 , lowercase_ : str=1024 , lowercase_ : Optional[Any]=24 , lowercase_ : Any=16 , lowercase_ : int=4096 , lowercase_ : List[Any]="gelu" , lowercase_ : Any=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=1024 , lowercase_ : Optional[Any]=0.0_2 , lowercase_ : Optional[Any]=1e-12 , lowercase_ : int=True , lowercase_ : Any=True , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : int=1 , lowercase_ : Optional[int]=0 , lowercase_ : str=2 , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Any =vocab_size
UpperCamelCase__ : Optional[int] =max_position_embeddings
UpperCamelCase__ : List[str] =hidden_size
UpperCamelCase__ : Tuple =num_hidden_layers
UpperCamelCase__ : Tuple =num_attention_heads
UpperCamelCase__ : int =intermediate_size
UpperCamelCase__ : Optional[int] =hidden_act
UpperCamelCase__ : str =hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] =attention_probs_dropout_prob
UpperCamelCase__ : List[Any] =initializer_range
UpperCamelCase__ : List[str] =layer_norm_eps
UpperCamelCase__ : Dict =scale_embedding
UpperCamelCase__ : str =use_cache
UpperCamelCase__ : List[Any] =layerdrop
UpperCamelCase__ : int =activation_dropout
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 157 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase_ : Tuple=None , **lowercase_ : int ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
UpperCamelCase__ : Optional[Any] =model
UpperCamelCase__ : str =kwargs.get('''model_save_dir''' , lowercase_ )
UpperCamelCase__ : int =kwargs.get('''latest_model_name''' , lowercase_ )
def __call__( self : Any , **lowercase_ : Any ):
UpperCamelCase__ : str ={k: np.array(lowercase_ ) for k, v in kwargs.items()}
return self.model.run(lowercase_ , lowercase_ )
@staticmethod
def _lowerCAmelCase ( lowercase_ : Union[str, Path] , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
UpperCamelCase__ : List[str] ='''CPUExecutionProvider'''
return ort.InferenceSession(lowercase_ , providers=[provider] , sess_options=lowercase_ )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Path] , lowercase_ : Optional[str] = None , **lowercase_ : Union[str, Any] ):
UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCamelCase__ : Tuple =self.model_save_dir.joinpath(self.latest_model_name )
UpperCamelCase__ : str =Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCamelCase__ : List[str] =self.model_save_dir.joinpath(lowercase_ )
if src_path.exists():
UpperCamelCase__ : List[str] =Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, os.PathLike] , **lowercase_ : int , ):
if os.path.isfile(lowercase_ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
# saving model weights/files
self._save_pretrained(lowercase_ , **lowercase_ )
@classmethod
def _lowerCAmelCase ( cls : List[str] , lowercase_ : Union[str, Path] , lowercase_ : Optional[Union[bool, str, None]] = None , lowercase_ : Optional[Union[str, None]] = None , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional["ort.SessionOptions"] = None , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase_ ):
UpperCamelCase__ : Any =OnnxRuntimeModel.load_model(
os.path.join(lowercase_ , lowercase_ ) , provider=lowercase_ , sess_options=lowercase_ )
UpperCamelCase__ : List[str] =Path(lowercase_ )
# load model from hub
else:
# download model
UpperCamelCase__ : Tuple =hf_hub_download(
repo_id=lowercase_ , filename=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , )
UpperCamelCase__ : Any =Path(lowercase_ ).parent
UpperCamelCase__ : List[Any] =Path(lowercase_ ).name
UpperCamelCase__ : Optional[int] =OnnxRuntimeModel.load_model(lowercase_ , provider=lowercase_ , sess_options=lowercase_ )
return cls(model=lowercase_ , **lowercase_ )
@classmethod
def _lowerCAmelCase ( cls : Dict , lowercase_ : Union[str, Path] , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Dict =None
if len(str(lowercase_ ).split('''@''' ) ) == 2:
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =model_id.split('''@''' )
return cls._from_pretrained(
model_id=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , use_auth_token=lowercase_ , **lowercase_ , )
| 157 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_SCREAMING_SNAKE_CASE : Tuple = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_SCREAMING_SNAKE_CASE : Tuple = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_SCREAMING_SNAKE_CASE : int = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_SCREAMING_SNAKE_CASE : List[Any] = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase ( self : List[str] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]=0.9 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.5 ) -> int:
if NLTK_VERSION >= version.Version('3.6.5' ):
lowerCamelCase_ = [
meteor_score.single_meteor_score(
word_tokenize(__SCREAMING_SNAKE_CASE ) , word_tokenize(__SCREAMING_SNAKE_CASE ) , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , gamma=__SCREAMING_SNAKE_CASE )
for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
else:
lowerCamelCase_ = [
meteor_score.single_meteor_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , gamma=__SCREAMING_SNAKE_CASE )
for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(__SCREAMING_SNAKE_CASE )}
| 183 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_SCREAMING_SNAKE_CASE : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
super().__init__()
lowerCamelCase_ = torchvision.models.resnetaaa(pretrained=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = list(model.children() )[:-2]
lowerCamelCase_ = nn.Sequential(*__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCamelCase_ = self.pool(self.model(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = torch.flatten(__SCREAMING_SNAKE_CASE , start_dim=2 )
lowerCamelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class a ( __snake_case ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
lowerCamelCase_ = [json.loads(__SCREAMING_SNAKE_CASE ) for l in open(__SCREAMING_SNAKE_CASE )]
lowerCamelCase_ = os.path.dirname(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer
lowerCamelCase_ = labels
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = transforms
def __len__( self : Any ) -> Any:
return len(self.data )
def __getitem__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
lowerCamelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase_ = sentence[: self.max_seq_length]
lowerCamelCase_ = torch.zeros(self.n_classes )
lowerCamelCase_ = 1
lowerCamelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowerCamelCase_ = self.transforms(__SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase ( self : Dict ) -> Dict:
lowerCamelCase_ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> str:
lowerCamelCase_ = [len(row['sentence'] ) for row in batch]
lowerCamelCase_ , lowerCamelCase_ = len(_lowerCamelCase ), max(_lowerCamelCase )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCamelCase , _lowerCamelCase ) ):
lowerCamelCase_ = input_row['sentence']
lowerCamelCase_ = 1
lowerCamelCase_ = torch.stack([row['image'] for row in batch] )
lowerCamelCase_ = torch.stack([row['label'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_start_token'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ) -> List[str]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 183 | 1 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : str = logging.get_logger(__name__)
_snake_case : List[str] = {"vocab_file": "spiece.model"}
_snake_case : int = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_snake_case : List[str] = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class a (a__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : str=False , lowerCamelCase : str=False , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=None , lowerCamelCase : int=None , lowerCamelCase : Optional[int] = None , **lowerCamelCase : Union[str, Any] , ) -> None:
__snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__snake_case : Dict = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
__snake_case : Tuple = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__snake_case : str = '<|endoftext|>' if eos_token is None else eos_token
__snake_case : Tuple = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__snake_case : str = unk_token if pad_token is None else pad_token
__snake_case : List[str] = eos_token if bos_token is None else bos_token
else:
__snake_case : List[Any] = '<pad>' if pad_token is None else pad_token
__snake_case : Dict = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__snake_case : List[str] = do_lower_case
__snake_case : List[str] = remove_space
__snake_case : List[Any] = keep_accents
__snake_case : List[str] = vocab_file
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
# Used for whitespace normalization in input texts
# fmt : off
__snake_case : Dict = {' ', 'โ', 'โ', 'โฏ', 'โ
', 'ใ', 'โ', ' ', 'โ', 'โ', '๏ฟผ', 'ย'}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__snake_case : List[Any] = re.compile(
F'[{"".join(map(SCREAMING_SNAKE_CASE_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self : Any ) -> Tuple:
__snake_case : List[Any] = self.__dict__.copy()
__snake_case : Optional[int] = None
return state
def __setstate__( self : Dict , lowerCamelCase : List[str] ) -> Any:
__snake_case : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Optional[int] = {}
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __snake_case ( self : str ) -> int:
return len(self.sp_model )
def __snake_case ( self : int , lowerCamelCase : Optional[Any] ) -> str:
__snake_case : Dict = self.non_printing_characters_re.sub("" , SCREAMING_SNAKE_CASE_ )
# Normalize whitespaces
__snake_case : Any = ''.join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
__snake_case : Dict = unicodedata.normalize("NFC" , SCREAMING_SNAKE_CASE_ )
return text
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , **lowerCamelCase : int ) -> List[str]:
__snake_case : Any = self.preprocess_text(SCREAMING_SNAKE_CASE_ )
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] ) -> int:
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple , lowerCamelCase : Union[str, Any] ) -> str:
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
@staticmethod
def __snake_case ( lowerCamelCase : List[str] ) -> str:
return out_string
def __snake_case ( self : str , lowerCamelCase : List[str] ) -> str:
__snake_case : Optional[Any] = []
__snake_case : List[Any] = ''
__snake_case : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__snake_case : Dict = True
__snake_case : Optional[Any] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
__snake_case : str = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string
def __snake_case ( self : str ) -> Dict[str, int]:
__snake_case : Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : int = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : List[str] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fi:
__snake_case : Any = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def __snake_case ( self : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Any = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case : List[str] = self.preprocess_text(SCREAMING_SNAKE_CASE_ )
__snake_case : Optional[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE_ )
else:
__snake_case : Union[str, Any] = [self.preprocess_text(SCREAMING_SNAKE_CASE_ ) for t in text]
__snake_case : Any = self.sp_model.encode(SCREAMING_SNAKE_CASE_ )
if return_tensors is True or return_tensors == "pt":
__snake_case : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE_ )
return token_ids
def __snake_case ( self : int , lowerCamelCase : Dict ) -> str:
return self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int , lowerCamelCase : Optional[Any] ) -> List[int]:
__snake_case : List[Any] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
__snake_case : Optional[Any] = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(SCREAMING_SNAKE_CASE_ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=SCREAMING_SNAKE_CASE_ )
| 365 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_snake_case : List[str] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = _TestCommandArgs(dataset=__lowerCamelCase , all_configs=__lowerCamelCase , save_infos=__lowerCamelCase )
__snake_case : List[Any] = TestCommand(*__lowerCamelCase )
test_command.run()
__snake_case : List[Any] = os.path.join(__lowerCamelCase , "README.md" )
assert os.path.exists(__lowerCamelCase )
__snake_case : Optional[Any] = DatasetInfosDict.from_directory(__lowerCamelCase )
__snake_case : List[str] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_3_5_1_5_6_3,
"num_examples": 1_0_0_0_0,
},
{
"name": "validation",
"num_bytes": 2_3_8_4_1_8,
"num_examples": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__snake_case , __snake_case : Tuple = getattr(dataset_infos["default"] , __lowerCamelCase ), getattr(expected_dataset_infos["default"] , __lowerCamelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCamelCase , __lowerCamelCase )
elif key == "splits":
assert list(__lowerCamelCase ) == list(__lowerCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 134 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__(self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : int=3_2 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : str=1_0 , __UpperCAmelCase : Dict=[1_0, 2_0, 3_0, 4_0] , __UpperCAmelCase : List[Any]=[1, 1, 2, 1] , __UpperCAmelCase : int=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[int]="relu" , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Any=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embeddings_size
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = depths
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = scope
UpperCAmelCase__ = len(__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase_ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = RegNetModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase_ (self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = RegNetForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : Dict = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = RegNetModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowercase_ (self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowercase_ (self : List[Any] ) -> int:
"""simple docstring"""
pass
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowercase_ (self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=__UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowercase_ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ):
UpperCAmelCase__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase__ = layer_type
UpperCAmelCase__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : int ) -> Dict:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = RegNetModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCAmelCase )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 65 |
from maths.prime_factors import prime_factors
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :Dict = F'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 365 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 163 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
snake_case_ :List[Any] = 1024
snake_case_ :int = 4096
snake_case_ :int = 24
snake_case_ :Tuple = 16
snake_case_ :Any = [5, 11, 17, 23]
snake_case_ :List[Any] = [256, 512, 1024, 1024]
snake_case_ :str = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
snake_case_ :List[str] = 768
snake_case_ :Any = [1, 1, 1, 0.5]
snake_case_ :Optional[Any] = [256, 512, 768, 768]
snake_case_ :Optional[Any] = 150
snake_case_ :List[str] = 16
snake_case_ :Optional[Any] = (1, 384, 384)
snake_case_ :Tuple = False
snake_case_ :List[Any] = """project"""
if "ade" in checkpoint_url:
snake_case_ :Dict = True
snake_case_ :Optional[int] = 768
snake_case_ :int = [1, 1, 1, 0.5]
snake_case_ :Any = 150
snake_case_ :Optional[Any] = 16
snake_case_ :List[Any] = """huggingface/label-files"""
snake_case_ :Any = """ade20k-id2label.json"""
snake_case_ :Optional[Any] = json.load(open(cached_download(hf_hub_url(_lowercase, _lowercase, repo_type="""dataset""" ) ), """r""" ) )
snake_case_ :Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
snake_case_ :Union[str, Any] = idalabel
snake_case_ :str = {v: k for k, v in idalabel.items()}
snake_case_ :List[str] = [1, 150, 480, 480]
return config, expected_shape
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowercase, _lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case_ :str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
snake_case_ :Optional[Any] = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
snake_case_ :List[str] = name.replace("""patch_embed""", """""" )
if "pos_embed" in name:
snake_case_ :int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
snake_case_ :str = name.replace("""proj""", """projection""" )
if "blocks" in name:
snake_case_ :Dict = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
snake_case_ :int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case_ :int = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name and "backbone" not in name:
snake_case_ :Optional[int] = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
snake_case_ :str = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
snake_case_ :List[str] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
snake_case_ :int = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
snake_case_ :Tuple = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
snake_case_ :List[str] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
snake_case_ :Tuple = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
snake_case_ :Optional[int] = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
snake_case_ :Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case_ :Optional[Any] = name.replace(f"""refinenet{layer_idx}""", f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
snake_case_ :str = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
snake_case_ :Union[str, Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
snake_case_ :int = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
snake_case_ :int = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
snake_case_ :str = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case_ :Optional[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case_ :List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case_ :Optional[int] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case_ :int = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case_ :Optional[Any] = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
snake_case_ :Optional[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
snake_case_ :int = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
snake_case_ :Optional[int] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
snake_case_ :List[str] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
snake_case_ :Tuple = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
snake_case_ :str = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
snake_case_ :List[str] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
snake_case_ :Optional[int] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
snake_case_ :Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
snake_case_ :Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
snake_case_ :List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
if "backbone" in name:
snake_case_ :List[str] = name.replace("""backbone""", """backbone.bit.encoder""" )
if ".." in name:
snake_case_ :str = name.replace("""..""", """.""" )
if "stem.conv" in name:
snake_case_ :Optional[Any] = name.replace("""stem.conv""", """bit.embedder.convolution""" )
if "blocks" in name:
snake_case_ :int = name.replace("""blocks""", """layers""" )
if "convolution" in name and "backbone" in name:
snake_case_ :Any = name.replace("""convolution""", """conv""" )
if "layer" in name and "backbone" in name:
snake_case_ :Optional[int] = name.replace("""layer""", """layers""" )
if "backbone.bit.encoder.bit" in name:
snake_case_ :Any = name.replace("""backbone.bit.encoder.bit""", """backbone.bit""" )
if "embedder.conv" in name:
snake_case_ :List[Any] = name.replace("""embedder.conv""", """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
snake_case_ :Any = name.replace("""backbone.bit.encoder.stem.norm""", """backbone.bit.embedder.norm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ :str = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
snake_case_ :List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ :List[Any] = in_proj_weight[: config.hidden_size, :]
snake_case_ :Union[str, Any] = in_proj_bias[: config.hidden_size]
snake_case_ :List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ :List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ :Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ :Optional[int] = in_proj_bias[-config.hidden_size :]
def A_ ( ):
'''simple docstring'''
snake_case_ :Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ :List[Any] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw )
return im
@torch.no_grad()
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_, snake_case_ :int = get_dpt_config(_lowercase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
snake_case_ :Any = torch.load(_lowercase, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowercase )
# rename keys
for key in state_dict.copy().keys():
snake_case_ :Any = state_dict.pop(_lowercase )
snake_case_ :int = val
# read in qkv matrices
read_in_q_k_v(_lowercase, _lowercase )
# load HuggingFace model
snake_case_ :Tuple = DPTForSemanticSegmentation(_lowercase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# Check outputs on an image
snake_case_ :List[str] = 480 if """ade""" in checkpoint_url else 384
snake_case_ :Any = DPTImageProcessor(size=_lowercase )
snake_case_ :Any = prepare_img()
snake_case_ :Tuple = image_processor(_lowercase, return_tensors="""pt""" )
# forward pass
snake_case_ :str = model(**_lowercase ).logits if """ade""" in checkpoint_url else model(**_lowercase ).predicted_depth
if show_prediction:
snake_case_ :Union[str, Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="""bicubic""", align_corners=_lowercase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
__a = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 66 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=_a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ["torch", "transformers", "onnx"]
def __init__( self : int , *snake_case : List[str] , **snake_case : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : str , *snake_case : int , **snake_case : Optional[Any] ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : Tuple , *snake_case : Optional[int] , **snake_case : str ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ["torch", "transformers", "onnx"]
def __init__( self : int , *snake_case : List[Any] , **snake_case : Any ) -> Union[str, Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : str , *snake_case : List[Any] , **snake_case : str ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : List[Any] , *snake_case : List[str] , **snake_case : int ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ["torch", "transformers", "onnx"]
def __init__( self : Any , *snake_case : List[str] , **snake_case : List[Any] ) -> List[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , *snake_case : Optional[int] , **snake_case : Tuple ) -> List[str]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] , *snake_case : str , **snake_case : Dict ) -> List[str]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *snake_case : Dict , **snake_case : Dict ) -> Optional[int]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : Dict , *snake_case : str , **snake_case : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : Any , *snake_case : Optional[Any] , **snake_case : Optional[int] ) -> List[str]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *snake_case : int , **snake_case : str ) -> Any:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , *snake_case : List[str] , **snake_case : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : Dict , *snake_case : Tuple , **snake_case : int ) -> List[str]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *snake_case : Union[str, Any] , **snake_case : str ) -> Optional[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : Any , *snake_case : int , **snake_case : Dict ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase__ ( cls : Dict , *snake_case : Dict , **snake_case : Dict ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 240 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase :Union[str, Any] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Dict = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Dict = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCAmelCase :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 240 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
return abs(lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase__ , UpperCAmelCase__ = y, x % y
return abs(lowerCamelCase )
def a_ ( ):
try:
UpperCAmelCase__ = input('Enter two integers separated by comma (,): ' ).split(',' )
UpperCAmelCase__ = int(nums[0] )
UpperCAmelCase__ = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(lowerCamelCase , lowerCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase , lowerCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 98 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( __UpperCAmelCase):
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''num_heads''' ) )
class lowercase :
def __init__( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : List[str]=64 , _lowerCamelCase : int=3 , _lowerCamelCase : int=[16, 48, 96] , _lowerCamelCase : Dict=[1, 3, 6] , _lowerCamelCase : List[Any]=[1, 2, 10] , _lowerCamelCase : Optional[int]=[7, 3, 3] , _lowerCamelCase : Optional[int]=[4, 2, 2] , _lowerCamelCase : Union[str, Any]=[2, 1, 1] , _lowerCamelCase : str=[2, 2, 2] , _lowerCamelCase : Tuple=[False, False, True] , _lowerCamelCase : Union[str, Any]=[0.0, 0.0, 0.0] , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Dict=1E-12 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : List[Any]=2 , ):
"""simple docstring"""
A_ : Tuple = parent
A_ : Dict = batch_size
A_ : str = image_size
A_ : Dict = patch_sizes
A_ : Optional[int] = patch_stride
A_ : Optional[int] = patch_padding
A_ : Optional[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : str = num_labels
A_ : Optional[int] = num_channels
A_ : str = embed_dim
A_ : Tuple = num_heads
A_ : List[Any] = stride_kv
A_ : str = depth
A_ : Dict = cls_token
A_ : Optional[Any] = attention_drop_rate
A_ : str = initializer_range
A_ : Tuple = layer_norm_eps
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Tuple = self.get_config()
return config, pixel_values, labels
def a_ ( self : Any ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def a_ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : str = TFCvtModel(config=_lowerCamelCase )
A_ : Any = model(_lowerCamelCase , training=_lowerCamelCase )
A_ : int = (self.image_size, self.image_size)
A_ , A_ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
A_ : List[str] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
A_ : int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def a_ ( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Any = self.num_labels
A_ : str = TFCvtForImageClassification(_lowerCamelCase )
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Tuple = config_and_inputs
A_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : str = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowerCAmelCase : Tuple = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : List[str] = False
def a_ ( self : int ):
"""simple docstring"""
A_ : Dict = TFCvtModelTester(self )
A_ : int = TFCvtConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def a_ ( self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def a_ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def a_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def a_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def a_ ( self : Tuple ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def a_ ( self : Dict ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def a_ ( self : str ):
"""simple docstring"""
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : Optional[int] = outputs.hidden_states
A_ : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def a_ ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFCvtModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase_ ( ):
"""simple docstring"""
A_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase):
@cached_property
def a_ ( self : List[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def a_ ( self : Any ):
"""simple docstring"""
A_ : Optional[int] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : str = prepare_img()
A_ : Optional[int] = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# forward pass
A_ : Dict = model(**_lowerCamelCase )
# verify the logits
A_ : Union[str, Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1E-4 ) )
| 167 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def UpperCamelCase_ ( A__ : int = 2_00_00_00 ):
'''simple docstring'''
return sum(takewhile(lambda A__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def _A ( ) -> Generator[int, None, None]:
_lowercase : List[Any] = {}
_lowercase : List[str] = 2
while True:
_lowercase : List[str] = factor_map.pop(snake_case , snake_case )
if factor:
_lowercase : List[str] = factor + prime
while x in factor_map:
x += factor
_lowercase : Tuple = factor
else:
_lowercase : Optional[Any] = prime
yield prime
prime += 1
def _A ( snake_case = 1E10 ) -> int:
_lowercase : Tuple = sieve()
_lowercase : Optional[Any] = 1
while True:
_lowercase : Tuple = next(snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case )
n += 2
if __name__ == "__main__":
print(solution())
| 250 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : int = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367 |
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :List[Any] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
_lowerCAmelCase :Dict = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 1 |
import requests
lowerCAmelCase__ : Union[str, Any] ='''''' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase__ : Tuple ='''https://api.openweathermap.org/data/2.5/'''
def __lowercase ( a__ = "Chicago" , a__ = APPID ) -> dict:
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def __lowercase ( a__ = "Kolkata, India" , a__ = APPID ) -> dict:
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def __lowercase ( a__ = 55.68 , a__ = 12.57 , a__ = APPID ) -> dict:
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase__ : int =input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 118 |
def __lowercase ( a__ = 10_00 ) -> int:
__SCREAMING_SNAKE_CASE = -1
__SCREAMING_SNAKE_CASE = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__SCREAMING_SNAKE_CASE = (n * n - 2 * a * n) // (2 * n - 2 * a)
__SCREAMING_SNAKE_CASE = n - a - b
if c * c == (a * a + b * b):
__SCREAMING_SNAKE_CASE = a * b * c
if candidate >= product:
__SCREAMING_SNAKE_CASE = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 | 1 |
'''simple docstring'''
import qiskit
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase_ = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase_ = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase_ = qiskit.execute(snake_case_ , snake_case_ , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =half_adder(1, 1)
print(f"Half Adder Output Qubit Counts: {counts}")
| 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 160 | 0 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
return int(input_a == input_a == 0 )
def A__ ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 370 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = """luke"""
def __init__( self : Tuple , snake_case_ : List[Any]=5_0267 , snake_case_ : Any=50_0000 , snake_case_ : str=768 , snake_case_ : int=256 , snake_case_ : str=12 , snake_case_ : int=12 , snake_case_ : Dict=3072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : int=512 , snake_case_ : Dict=2 , snake_case_ : List[Any]=0.02 , snake_case_ : int=1e-12 , snake_case_ : Union[str, Any]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=1 , snake_case_ : Optional[int]=0 , snake_case_ : List[str]=2 , **snake_case_ : Union[str, Any] , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Tuple = entity_vocab_size
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: Any = entity_emb_size
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: int = type_vocab_size
UpperCamelCase_: List[Any] = initializer_range
UpperCamelCase_: Union[str, Any] = layer_norm_eps
UpperCamelCase_: Tuple = use_entity_aware_attention
UpperCamelCase_: int = classifier_dropout
| 223 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger('transformers.models.speecht5')
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
hf_model.apply_weight_norm()
a_ = checkpoint["input_conv.weight_g"]
a_ = checkpoint["input_conv.weight_v"]
a_ = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
a_ = checkpoint[F'''upsamples.{i}.1.weight_g''']
a_ = checkpoint[F'''upsamples.{i}.1.weight_v''']
a_ = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
a_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
a_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
a_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
a_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
a_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
a_ = checkpoint["output_conv.1.weight_g"]
a_ = checkpoint["output_conv.1.weight_v"]
a_ = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , ) ->Tuple:
"""simple docstring"""
if config_path is not None:
a_ = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase )
else:
a_ = SpeechTaHifiGanConfig()
a_ = SpeechTaHifiGan(UpperCAmelCase )
a_ = torch.load(UpperCAmelCase )
load_weights(orig_checkpoint["model"]["generator"] , UpperCAmelCase , UpperCAmelCase )
a_ = np.load(UpperCAmelCase )
a_ = stats[0].reshape(-1 )
a_ = stats[1].reshape(-1 )
a_ = torch.from_numpy(UpperCAmelCase ).float()
a_ = torch.from_numpy(UpperCAmelCase ).float()
model.save_pretrained(UpperCAmelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the ๐ค hub.'
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 243 |
"""simple docstring"""
from __future__ import annotations
class snake_case :
def __init__( self , __UpperCAmelCase) ->Any:
a_ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float.")
if len(__UpperCAmelCase) != 0:
a_ = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float)):
raise error
a_ = rows
else:
a_ = []
def UpperCAmelCase__ ( self) ->list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def UpperCAmelCase__ ( self) ->int:
return len(self.rows)
@property
def UpperCAmelCase__ ( self) ->int:
return len(self.rows[0])
@property
def UpperCAmelCase__ ( self) ->tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase__ ( self) ->bool:
return self.order[0] == self.order[1]
def UpperCAmelCase__ ( self) ->Matrix:
a_ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def UpperCAmelCase__ ( self) ->bool:
return bool(self.determinant())
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(__UpperCAmelCase).determinant()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->int:
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase)
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Matrix:
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def UpperCAmelCase__ ( self) ->Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def UpperCAmelCase__ ( self) ->Matrix:
a_ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Matrix:
a_ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse")
return self.adjugate() * (1 / determinant)
def __repr__( self) ->str:
return str(self.rows)
def __str__( self) ->str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase) for value in row]) + ".]"
for row in self.rows
])
+ "]"
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->None:
a_ = TypeError("Row must be a list containing all ints and/or floats")
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float)):
raise type_error
if len(__UpperCAmelCase) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix")
if position is None:
self.rows.append(__UpperCAmelCase)
else:
a_ = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->None:
a_ = TypeError(
"Column must be a list containing all ints and/or floats")
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float)):
raise type_error
if len(__UpperCAmelCase) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix")
if position is None:
a_ = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
a_ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self , __UpperCAmelCase) ->bool:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
return NotImplemented
return self.rows == other.rows
def __ne__( self , __UpperCAmelCase) ->bool:
return not self == other
def __neg__( self) ->Matrix:
return self * -1
def __add__( self , __UpperCAmelCase) ->Matrix:
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self , __UpperCAmelCase) ->Matrix:
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self , __UpperCAmelCase) ->Matrix:
if isinstance(__UpperCAmelCase , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(__UpperCAmelCase , __UpperCAmelCase):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second")
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix")
def __pow__( self , __UpperCAmelCase) ->Matrix:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
raise TypeError("A Matrix can only be raised to the power of an int")
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power")
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power")
a_ = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase , __UpperCAmelCase) ->int:
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 | 1 |
def _A ( __magic_name__ , __magic_name__ ):
return abs(__magic_name__ ) if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowercase__ , lowercase__ = y, x % y
return abs(__magic_name__ )
def _A ( ):
try:
lowercase__ = input("Enter two integers separated by comma (,): " ).split("," )
lowercase__ = int(nums[0] )
lowercase__ = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(__magic_name__ , __magic_name__ )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__magic_name__ , __magic_name__ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 351 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( __magic_name__=32 , __magic_name__=10 , __magic_name__=100 , __magic_name__=1026 , __magic_name__=True , __magic_name__="data/tokenized_stories_train_wikitext103.jbl" , __magic_name__="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
__magic_name__ , __magic_name__ , number=__magic_name__ , min_len=1026 , trim=__magic_name__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowercase__ = load_gpta("gpt2" ).to(__magic_name__ )
print("computing perplexity on objective set" )
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ ).item()
print("perplexity on objective set:" , __magic_name__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( __magic_name__ , __magic_name__=15 , __magic_name__=128 , __magic_name__=100 , __magic_name__="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(__magic_name__ )
# Train secondary learner
lowercase__ = train_secondary_learner(
__magic_name__ , __magic_name__ , max_epochs=__magic_name__ , batch_size=__magic_name__ , eval_freq=100 , igf_model_path=__magic_name__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=32 , __magic_name__=1000 , __magic_name__=16 , __magic_name__=1.0 , __magic_name__=recopy_gpta , __magic_name__=None , __magic_name__=10 , __magic_name__="gpt2_finetuned.pt" , ):
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowercase__ = RandomSampler(__magic_name__ )
lowercase__ = DataLoader(__magic_name__ , sampler=__magic_name__ )
lowercase__ = max_steps // (len(__magic_name__ )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = recopy_model(__magic_name__ , __magic_name__ , __magic_name__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(__magic_name__ )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
for epoch in range(int(__magic_name__ ) ):
for step, example in enumerate(__magic_name__ ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(__magic_name__ , labels=__magic_name__ )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(__magic_name__ , dtype=torch.long , device=__magic_name__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__magic_name__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __magic_name__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__magic_name__ , default=__magic_name__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__magic_name__ , default=__magic_name__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__magic_name__ , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__magic_name__ , default=__magic_name__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__magic_name__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__magic_name__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__magic_name__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__magic_name__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__magic_name__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__magic_name__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__magic_name__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__magic_name__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__magic_name__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__magic_name__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__magic_name__ , type=__magic_name__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__magic_name__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__magic_name__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__magic_name__ , type=__magic_name__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__magic_name__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowercase__ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowercase__ = training_secondary_learner(
__magic_name__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__magic_name__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__magic_name__ , __magic_name__ , __magic_name__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__magic_name__ , secondary_learner=__magic_name__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 201 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase__ ( lowercase ):
def __init__( self : int ,lowerCamelCase__ : List[Any]=0.0_1 ,lowerCamelCase__ : Dict=1000 ):
'''simple docstring'''
_UpperCamelCase : str = p_stop
_UpperCamelCase : List[Any] = max_length
def __iter__( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCamelCase : Dict = random.random() < self.p_stop
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Union[str, Any]=True ):
'''simple docstring'''
_UpperCamelCase : str = [
BatchSamplerShard(lowerCamelCase__ ,2 ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
for i in range(2 )
]
_UpperCamelCase : Dict = [list(lowerCamelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCamelCase__ ) for shard in batch_sampler_shards] ,[len(lowerCamelCase__ ) for e in expected] )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
_UpperCamelCase : Any = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : int = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase : List[str] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : List[Any] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase : Any = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : str = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase : List[Any] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase : int = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Any = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
_UpperCamelCase : int = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase : Optional[int] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
_UpperCamelCase : Any = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase : List[str] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
_UpperCamelCase : List[Any] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase : Optional[Any] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : int = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
_UpperCamelCase : str = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
_UpperCamelCase : List[Any] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase : List[str] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Tuple = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase : Union[str, Any] = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : str = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase : List[str] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : str = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase : str = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : List[str] = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
_UpperCamelCase : List[Any] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Any = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase : int = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Tuple = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase : Tuple = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase : List[Any] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Any = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCamelCase : Optional[Any] = [BatchSamplerShard(lowerCamelCase__ ,2 ,lowerCamelCase__ ,even_batches=lowerCamelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) ,3 )
self.assertEqual(len(batch_sampler_shards[1] ) ,2 )
self.assertListEqual(list(batch_sampler_shards[0] ) ,[[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) ,[[3, 4], [9, 10, 11]] )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str=False ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : Optional[int]=False ):
'''simple docstring'''
random.seed(lowerCamelCase__ )
_UpperCamelCase : str = list(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [
IterableDatasetShard(
lowerCamelCase__ ,batch_size=lowerCamelCase__ ,drop_last=lowerCamelCase__ ,num_processes=lowerCamelCase__ ,process_index=lowerCamelCase__ ,split_batches=lowerCamelCase__ ,)
for i in range(lowerCamelCase__ )
]
_UpperCamelCase : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCamelCase__ )
iterable_dataset_lists.append(list(lowerCamelCase__ ) )
_UpperCamelCase : Any = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCamelCase : List[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) )
self.assertTrue(len(lowerCamelCase__ ) % shard_batch_size == 0 )
_UpperCamelCase : str = []
for idx in range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
reference += reference
self.assertListEqual(lowerCamelCase__ ,reference[: len(lowerCamelCase__ )] )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = 42
_UpperCamelCase : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
# Edge case with a very small dataset
_UpperCamelCase : List[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = BatchSampler(range(16 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[Any] = SkipBatchSampler(lowerCamelCase__ ,2 )
self.assertListEqual(list(lowerCamelCase__ ) ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = SkipDataLoader(list(range(16 ) ) ,batch_size=4 ,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = DataLoader(list(range(16 ) ) ,batch_size=4 )
_UpperCamelCase : Optional[int] = skip_first_batches(lowerCamelCase__ ,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = DataLoaderShard(list(range(16 ) ) ,batch_size=4 )
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
Accelerator()
_UpperCamelCase : List[Any] = DataLoaderDispatcher(range(16 ) ,batch_size=4 )
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
| 83 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_A = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def a__ ( ) -> str:
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Dict = g.get_repo("""huggingface/accelerate""" )
UpperCAmelCase__ : str = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = comments[0] if len(lowerCAmelCase ) > 0 else None
UpperCAmelCase__ : Optional[Any] = dt.utcnow()
UpperCAmelCase__ : List[str] = (current_time - issue.updated_at).days
UpperCAmelCase__ : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 171 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__A : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__):
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None )-> Union[str, Any]:
super().__init__()
lowerCamelCase_ =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCamelCase_ =torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ =None
lowerCamelCase_ =torch.nn.Parameter(_SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:VQModel
_UpperCamelCase:CLIPTextModel
_UpperCamelCase:CLIPTokenizer
_UpperCamelCase:TransformeraDModel
_UpperCamelCase:LearnedClassifierFreeSamplingEmbeddings
_UpperCamelCase:VQDiffusionScheduler
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[str]:
super().__init__()
self.register_modules(
vqvae=_SCREAMING_SNAKE_CASE , transformer=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Any:
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
lowerCamelCase_ =self.tokenizer(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCamelCase_ =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowerCamelCase_ =text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase_ =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCamelCase_ =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_SCREAMING_SNAKE_CASE )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ =prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCamelCase_ =self.learned_classifier_free_sampling_embeddings.embeddings
lowerCamelCase_ =negative_prompt_embeds.unsqueeze(0 ).repeat(_SCREAMING_SNAKE_CASE , 1 , 1 )
else:
lowerCamelCase_ =[""""""] * batch_size
lowerCamelCase_ =text_input_ids.shape[-1]
lowerCamelCase_ =self.tokenizer(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
lowerCamelCase_ =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowerCamelCase_ =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ =negative_prompt_embeds.shape[1]
lowerCamelCase_ =negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
lowerCamelCase_ =negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 5.0 , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , )-> Union[ImagePipelineOutput, Tuple]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}' )
lowerCamelCase_ =batch_size * num_images_per_prompt
lowerCamelCase_ =guidance_scale > 1.0
lowerCamelCase_ =self._encode_prompt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_SCREAMING_SNAKE_CASE )}.' )
# get the initial completely masked latents unless the user supplied it
lowerCamelCase_ =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCamelCase_ =self.transformer.num_vector_embeds - 1
lowerCamelCase_ =torch.full(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
lowerCamelCase_ =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
lowerCamelCase_ =self.scheduler.timesteps.to(self.device )
lowerCamelCase_ =latents
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the sample if we are doing classifier free guidance
lowerCamelCase_ =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCamelCase_ =self.transformer(_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE ).sample
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ =model_output.chunk(2 )
lowerCamelCase_ =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_SCREAMING_SNAKE_CASE , dim=1 , keepdim=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.truncate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# remove `log(0)`'s (`-inf`s)
lowerCamelCase_ =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.vqvae.config.vq_embed_dim
lowerCamelCase_ =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCamelCase_ =self.vqvae.quantize.get_codebook_entry(_SCREAMING_SNAKE_CASE , shape=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.vqvae.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE ).sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> torch.FloatTensor:
lowerCamelCase_ , lowerCamelCase_ =torch.sort(_SCREAMING_SNAKE_CASE , 1 , descending=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.exp(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCamelCase_ =torch.full_like(keep_mask[:, 0:1, :] , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.cat((all_true, keep_mask) , dim=1 )
lowerCamelCase_ =keep_mask[:, :-1, :]
lowerCamelCase_ =keep_mask.gather(1 , indices.argsort(1 ) )
lowerCamelCase_ =log_p_x_0.clone()
lowerCamelCase_ =-torch.inf # -inf = log(0)
return rv
| 49 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__A : Optional[Any] = logging.getLogger()
def __UpperCamelCase ( _A : Path , _A : list ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ="""\n""".join(_A )
Path(_A ).open("""w""" ).writelines(_A )
__A : List[str] = 'patrickvonplaten/t5-tiny-random'
__A : List[Any] = 'sshleifer/bart-tiny-random'
__A : List[str] = 'sshleifer/tiny-mbart'
__A : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCamelCase_ =input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCamelCase_ =[""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
lowerCamelCase_ ="""translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCamelCase_ =f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
run_generate()
assert Path(_SCREAMING_SNAKE_CASE ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self )-> List[Any]:
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> int:
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCamelCase_ =input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCamelCase_ ={
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist groรartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() )
lowerCamelCase_ =str(tmp_dir / """scores.json""" )
lowerCamelCase_ =str(tmp_dir / """val.target""" )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""en"""] )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""de"""] )
lowerCamelCase_ ="""translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCamelCase_ =f'\n run_eval_search.py\n {model}\n {str(_SCREAMING_SNAKE_CASE )}\n {str(_SCREAMING_SNAKE_CASE )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
with CaptureStdout() as cs:
run_search()
lowerCamelCase_ =[""" num_beams | length_penalty""", model, """Best score args"""]
lowerCamelCase_ =["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(_SCREAMING_SNAKE_CASE )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_SCREAMING_SNAKE_CASE ).exists()
os.remove(Path(_SCREAMING_SNAKE_CASE ) )
| 49 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: Optional[Any], a_: str, a_: Optional[Any] ):
_UpperCAmelCase : str = WavaVecaForSequenceClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[str] = downstream_dict["projector.weight"]
_UpperCAmelCase : Any = downstream_dict["projector.bias"]
_UpperCAmelCase : str = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase : int = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase ( a_: Tuple, a_: Tuple, a_: Union[str, Any] ):
_UpperCAmelCase : Dict = WavaVecaForAudioFrameClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : str = downstream_dict["model.linear.weight"]
_UpperCAmelCase : List[str] = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[Any], a_: Union[str, Any] ):
_UpperCAmelCase : Dict = WavaVecaForXVector.from_pretrained(a_, config=a_ )
_UpperCAmelCase : Optional[int] = downstream_dict["connector.weight"]
_UpperCAmelCase : str = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase : int = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_UpperCAmelCase : str = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_UpperCAmelCase : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase : int = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase : List[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase ( a_: int, a_: Tuple, a_: Optional[Any], a_: Dict ):
_UpperCAmelCase : Optional[Any] = torch.load(a_, map_location="cpu" )
_UpperCAmelCase : Union[str, Any] = checkpoint["Downstream"]
_UpperCAmelCase : Dict = WavaVecaConfig.from_pretrained(a_ )
_UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(
a_, return_attention_mask=a_, do_normalize=a_ )
_UpperCAmelCase : Dict = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase : Any = convert_classification(a_, a_, a_ )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase : Optional[int] = convert_diarization(a_, a_, a_ )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase : Union[str, Any] = convert_xvector(a_, a_, a_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase : List[Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__a = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 145 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A__ :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = str(id_ )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : int = {} # {vertex:distance}
def __lt__( self : List[str] , lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : int ) -> Any:
"""simple docstring"""
return self.id
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
self.neighbors.append(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = weight
def __UpperCAmelCase ( a_: Any, a_: Optional[Any], a_: Optional[Any], a_: List[str] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], a_ )
graph[b - 1].add_edge(graph[a - 1], a_ )
def __UpperCAmelCase ( a_: list, a_: Vertex ):
_UpperCAmelCase : Optional[int] = []
for u in graph:
_UpperCAmelCase : Dict = math.inf
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = graph[:]
while q:
_UpperCAmelCase : List[Any] = min(a_ )
q.remove(a_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_UpperCAmelCase : Optional[Any] = u
_UpperCAmelCase : List[Any] = u.edges[v.id]
for i in range(1, len(a_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __UpperCAmelCase ( a_: list, a_: Vertex ):
for u in graph:
_UpperCAmelCase : Optional[Any] = math.inf
_UpperCAmelCase : str = None
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : List[str] = list(a_ )
hq.heapify(a_ )
while h:
_UpperCAmelCase : str = hq.heappop(a_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_UpperCAmelCase : Any = u
_UpperCAmelCase : Optional[int] = u.edges[v.id]
hq.heapify(a_ )
for i in range(1, len(a_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __UpperCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145 | 1 |
"""simple docstring"""
import qiskit
def A ( snake_case :int , snake_case :int ) -> qiskit.result.counts.Counts:
__UpperCamelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__UpperCamelCase = qiskit.QuantumCircuit(snake_case , snake_case )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__UpperCamelCase = qiskit.execute(snake_case , snake_case , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 358 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'num_attention_heads' ) )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[128, 256, 384] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = kernel_size
__UpperCamelCase = stride
__UpperCamelCase = padding
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_attention_heads
__UpperCamelCase = depths
__UpperCamelCase = key_dim
__UpperCamelCase = drop_path_rate
__UpperCamelCase = patch_size
__UpperCamelCase = attention_ratio
__UpperCamelCase = mlp_ratio
__UpperCamelCase = initializer_range
__UpperCamelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = num_labels
__UpperCamelCase = initializer_range
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = (self.image_size, self.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__UpperCamelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCamelCase = (self.model_tester.image_size, self.model_tester.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__UpperCamelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__UpperCamelCase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
__UpperCamelCase = problem_type['title']
__UpperCamelCase = problem_type['num_labels']
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
__UpperCamelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__UpperCamelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
__UpperCamelCase = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Union[str, Any]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 263 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase =model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
__UpperCamelCase =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCamelCase =TextStreamer(A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCamelCase =cs.out[:-1]
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase =model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
__UpperCamelCase =tokenizer.decode(greedy_ids[0] )
__UpperCamelCase =TextIteratorStreamer(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__UpperCamelCase =Thread(target=model.generate , kwargs=A_ )
thread.start()
__UpperCamelCase =''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase =model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
__UpperCamelCase =greedy_ids[:, input_ids.shape[1] :]
__UpperCamelCase =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCamelCase =TextStreamer(A_ , skip_prompt=A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCamelCase =cs.out[:-1]
self.assertEqual(A_ , A_ )
def _a ( self ) -> Any:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCamelCase =AutoTokenizer.from_pretrained('distilgpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =torch.ones((1, 5) , device=A_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCamelCase =TextStreamer(A_ , skip_special_tokens=A_ )
model.generate(A_ , max_new_tokens=1 , do_sample=A_ , streamer=A_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCamelCase =cs.out[:-1] # Remove the final "\n"
__UpperCamelCase =tokenizer(A_ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self ) -> Tuple:
__UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase =TextIteratorStreamer(A_ , timeout=0.001 )
__UpperCamelCase ={'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__UpperCamelCase =Thread(target=model.generate , kwargs=A_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_ ):
__UpperCamelCase =''
for new_text in streamer:
streamer_text += new_text
| 62 |
import socket
def _a ( ):
"""simple docstring"""
lowercase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase__ = socket.gethostname()
lowercase__ = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
lowercase__ = sock.recv(10_24 )
if not data:
break
out_file.write(SCREAMING_SNAKE_CASE )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 110 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCamelCase__:
def __init__( self : int , lowerCAmelCase : Collection[float] | None = None )-> None:
"""simple docstring"""
if components is None:
UpperCAmelCase = []
UpperCAmelCase = list(lowerCAmelCase )
def __len__( self : List[Any] )-> int:
"""simple docstring"""
return len(self.__components )
def __str__( self : int )-> str:
"""simple docstring"""
return "(" + ",".join(map(lowerCAmelCase , self.__components ) ) + ")"
def __add__( self : Tuple , lowerCAmelCase : Vector )-> Vector:
"""simple docstring"""
UpperCAmelCase = len(self )
if size == len(lowerCAmelCase ):
UpperCAmelCase = [self.__components[i] + other.component(lowerCAmelCase ) for i in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
else:
raise Exception('''must have the same size''' )
def __sub__( self : int , lowerCAmelCase : Vector )-> Vector:
"""simple docstring"""
UpperCAmelCase = len(self )
if size == len(lowerCAmelCase ):
UpperCAmelCase = [self.__components[i] - other.component(lowerCAmelCase ) for i in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : int , lowerCAmelCase : float )-> Vector:
"""simple docstring"""
...
@overload
def __mul__( self : Tuple , lowerCAmelCase : Vector )-> float:
"""simple docstring"""
...
def __mul__( self : Optional[int] , lowerCAmelCase : float | Vector )-> float | Vector:
"""simple docstring"""
if isinstance(lowerCAmelCase , (float, int) ):
UpperCAmelCase = [c * other for c in self.__components]
return Vector(lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and len(self ) == len(lowerCAmelCase ):
UpperCAmelCase = len(self )
UpperCAmelCase = [self.__components[i] * other.component(lowerCAmelCase ) for i in range(lowerCAmelCase )]
return sum(lowerCAmelCase )
else: # error case
raise Exception('''invalid operand!''' )
def a__( self : Union[str, Any] )-> Vector:
"""simple docstring"""
return Vector(self.__components )
def a__( self : List[Any] , lowerCAmelCase : int )-> float:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def a__( self : Any , lowerCAmelCase : int , lowerCAmelCase : float )-> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase = value
def a__( self : int )-> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
UpperCAmelCase = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase ) )
def a__( self : Optional[int] , lowerCAmelCase : Vector , lowerCAmelCase : bool = False )-> float:
"""simple docstring"""
UpperCAmelCase = self * other
UpperCAmelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
assert isinstance(A , A )
return Vector([0] * dimension )
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
assert isinstance(A , A ) and (isinstance(A , A ))
UpperCAmelCase = [0] * dimension
UpperCAmelCase = 1
return Vector(A )
def lowerCamelCase__ ( A : float , A : Vector , A : Vector ):
'''simple docstring'''
assert (
isinstance(A , A )
and isinstance(A , A )
and (isinstance(A , (int, float) ))
)
return x * scalar + y
def lowerCamelCase__ ( A : int , A : int , A : int ):
'''simple docstring'''
random.seed(A )
UpperCAmelCase = [random.randint(A , A ) for _ in range(A )]
return Vector(A )
class UpperCamelCase__:
def __init__( self : Tuple , lowerCAmelCase : list[list[float]] , lowerCAmelCase : int , lowerCAmelCase : int )-> None:
"""simple docstring"""
UpperCAmelCase = matrix
UpperCAmelCase = w
UpperCAmelCase = h
def __str__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , lowerCAmelCase : Matrix )-> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] + other.component(lowerCAmelCase , lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase )
return Matrix(lowerCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : int , lowerCAmelCase : Matrix )-> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] - other.component(lowerCAmelCase , lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase )
return Matrix(lowerCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : Tuple , lowerCAmelCase : float )-> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self : List[str] , lowerCAmelCase : Vector )-> Vector:
"""simple docstring"""
...
def __mul__( self : List[str] , lowerCAmelCase : float | Vector )-> Vector | Matrix:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ): # matrix-vector
if len(lowerCAmelCase ) == self.__width:
UpperCAmelCase = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] * other.component(lowerCAmelCase )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase , sum(lowerCAmelCase ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(lowerCAmelCase , (int, float) ): # matrix-scalar
UpperCAmelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase , self.__width , self.__height )
return None
def a__( self : Dict )-> int:
"""simple docstring"""
return self.__height
def a__( self : Optional[int] )-> int:
"""simple docstring"""
return self.__width
def a__( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int )-> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def a__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : float )-> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase = value
else:
raise Exception('''change_component: indices out of bounds''' )
def a__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int )-> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
UpperCAmelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase ) ):
UpperCAmelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase , self.__width - 1 , self.__height - 1 ).determinant()
def a__( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : int )-> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase , lowerCAmelCase )
else:
raise Exception('''Indices out of bounds''' )
def a__( self : Optional[Any] )-> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase ) for y in range(self.__width )
]
return sum(lowerCAmelCase )
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = [[0] * n for _ in range(A )]
return Matrix(A , A , A )
def lowerCamelCase__ ( A : int , A : int , A : int , A : int ):
'''simple docstring'''
random.seed(A )
UpperCAmelCase = [
[random.randint(A , A ) for _ in range(A )] for _ in range(A )
]
return Matrix(A , A , A )
| 91 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( A : int , A : int , A : int , A : int , A : int , A : int ):
'''simple docstring'''
if (ksize % 2) == 0:
UpperCAmelCase = ksize + 1
UpperCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A ):
for x in range(A ):
# distance from center
UpperCAmelCase = x - ksize // 2
UpperCAmelCase = y - ksize // 2
# degree to radiant
UpperCAmelCase = theta / 1_80 * np.pi
UpperCAmelCase = np.cos(_theta )
UpperCAmelCase = np.sin(_theta )
# get kernel x
UpperCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_lowercase : Tuple = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_lowercase : int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_lowercase : List[str] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_lowercase : List[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_lowercase : Optional[int] = out / out.max() * 255
_lowercase : Optional[int] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 91 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for char in word:
A : Dict = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : List[str] = set()
for token in tokens:
A : List[Any] = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
A : Any = list(_lowerCAmelCase )
return word_list
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A : Any = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
A : str = bert_tokens
A , A : Tuple = 0, len(_lowerCAmelCase )
while start < end:
A : Union[str, Any] = True
if is_chinese(bert_word[start] ):
A : List[Any] = min(end - start , _lowerCAmelCase )
for i in range(_lowerCAmelCase , 1 , -1 ):
A : Tuple = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A : str = """##""" + bert_word[j]
A : Union[str, Any] = start + i
A : Optional[int] = False
break
if single_word:
start += 1
return bert_word
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
A : List[str] = []
for i in range(0 , len(_lowerCAmelCase ) , 100 ):
A : Optional[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
A : int = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
A : Union[str, Any] = []
for i in range(0 , len(_lowerCAmelCase ) , 100 ):
A : Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
A : Optional[Any] = []
for input_ids, chinese_word in zip(_lowerCAmelCase , _lowerCAmelCase ):
A : List[str] = []
for id in input_ids:
A : Optional[int] = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
A : Any = add_sub_symbol(_lowerCAmelCase , _lowerCAmelCase )
A : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
A : Dict = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
A : Dict = f.readlines()
A : List[str] = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A : List[Any] = LTP(args.ltp ) # faster in GPU device
A : int = BertTokenizer.from_pretrained(args.bert )
A : Tuple = prepare_ref(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
A : Dict = [json.dumps(_lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
SCREAMING_SNAKE_CASE_:List[Any] = parser.parse_args()
main(args)
| 116 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_:List[Any] = TypeVar("""KEY""")
SCREAMING_SNAKE_CASE_:Dict = TypeVar("""VAL""")
@dataclass(frozen=SCREAMING_SNAKE_CASE__ , slots=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
'''simple docstring'''
__lowerCamelCase : KEY
__lowerCamelCase : VAL
class SCREAMING_SNAKE_CASE__ ( _Item ):
'''simple docstring'''
def __init__( self ):
super().__init__(lowerCamelCase__, lowerCamelCase__ )
def __bool__( self ):
return False
SCREAMING_SNAKE_CASE_:Optional[Any] = _DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 8, lowerCamelCase__ = 0.75 ):
A : List[str] = initial_block_size
A : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A : Dict = capacity_factor
A : Optional[Any] = 0
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return hash(lowerCamelCase__ ) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return (ind + 1) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = self._buckets[ind]
if not stored:
A : Optional[int] = _Item(lowerCamelCase__, lowerCamelCase__ )
self._len += 1
return True
elif stored.key == key:
A : int = _Item(lowerCamelCase__, lowerCamelCase__ )
return True
else:
return False
def _lowerCAmelCase ( self ):
A : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
A : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._buckets
A : Optional[Any] = [None] * new_size
A : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) * 2 )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) // 2 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = self._get_bucket_index(lowerCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
A : Dict = self._get_next_ind(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
if self._try_set(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
break
def __setitem__( self, lowerCamelCase__, lowerCamelCase__ ):
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase__, lowerCamelCase__ )
def __delitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : Tuple = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase__ )
if item is _deleted:
continue
if item.key == key:
A : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase__ )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
A : Union[str, Any] = """ ,""".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 116 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""ConvNextFeatureExtractor"""]
UpperCAmelCase = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 267 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 267 | 1 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : Dict , *a_ : Optional[int] , **a_ : Dict ):
super().__init__(*a_ , **a_ )
def lowerCamelCase ( self : List[str] , a_ : Dict , a_ : Tuple ):
lowerCAmelCase_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(a_ )
lowerCAmelCase_ : Tuple = self.values[key]
def lowerCamelCase ( self : str ):
return (
sum(self.charge_factor - len(a_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCamelCase ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(a_ ) == 0
):
return key
return super()._collision_resolution(a_ , a_ )
| 241 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A : Tuple = "src/transformers"
A : Optional[Any] = "docs/source/en/tasks"
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
with open(__UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
A : List[Any] = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A : Any = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase , set() )
SCREAMING_SNAKE_CASE_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase , __UpperCamelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
SCREAMING_SNAKE_CASE_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 118 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any]=13 ,lowerCamelCase__ : Dict=32 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Union[str, Any]=4 ,lowerCamelCase__ : Optional[int]=[10, 20, 30, 40] ,lowerCamelCase__ : Dict=[2, 2, 3, 2] ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=37 ,lowerCamelCase__ : Optional[Any]="gelu" ,lowerCamelCase__ : List[str]=10 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : int=["stage2", "stage3", "stage4"] ,lowerCamelCase__ : Optional[int]=[2, 3, 4] ,lowerCamelCase__ : List[Any]=None ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = num_stages
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = depths
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = out_features
UpperCAmelCase__ = out_indices
UpperCAmelCase__ = scope
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : str ):
return ConvNextConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = ConvNextModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = ConvNextForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ = None
UpperCAmelCase__ = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = ConvNextModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : Any ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
def check_hidden_states_output(lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = ConvNextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def a_ ( ):
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Tuple ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(lowerCamelCase__ )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**lowerCamelCase__ )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
@require_torch
class snake_case ( unittest.TestCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (ConvNextBackbone,) if is_torch_available() else ()
snake_case__ = ConvNextConfig
snake_case__ = False
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = ConvNextModelTester(self )
| 98 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase )
| 282 | 0 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : str , _snake_case : List[str]=None , _snake_case : str=None , _snake_case : Union[str, Any]=None , _snake_case : int=None , _snake_case : Optional[int]=None , ):
if attention_mask is None:
lowerCAmelCase : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase : Optional[int] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase : List[str] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_snake_case )
if decoder_head_mask is None:
lowerCAmelCase : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_snake_case )
if cross_attn_head_mask is None:
lowerCAmelCase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int]=1_3 , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int=9_9 , UpperCamelCase_ : Optional[Any]=1_6 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : List[str]="relu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : int=2_0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : List[Any]=0 , ):
lowerCAmelCase : Dict = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : Dict = decoder_layerdrop
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : List[Any] = eos_token_id
lowerCAmelCase : List[str] = pad_token_id
lowerCAmelCase : str = bos_token_id
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = self.eos_token_id # Eos Token
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase : int = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase : Tuple = self.get_config()
lowerCAmelCase : Union[str, Any] = prepare_mam_aaa_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : str ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ):
lowerCAmelCase : int = MaMaaaModel(config=UpperCamelCase_ ).get_decoder().to(UpperCamelCase_ ).eval()
lowerCAmelCase : str = inputs_dict['''input_ids''']
lowerCAmelCase : Tuple = inputs_dict['''attention_mask''']
lowerCAmelCase : List[str] = inputs_dict['''head_mask''']
# first forward pass
lowerCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : str = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )['''last_hidden_state''']
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[
'''last_hidden_state'''
]
# select random slice
lowerCAmelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-2 ) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Union[str, Any] = MaMaaaModel(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
lowerCAmelCase : Any = model(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = outputs.encoder_last_hidden_state
lowerCAmelCase : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = MaMaaaEncoder.from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Union[str, Any] = model.get_decoder()
decoder.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : str = MaMaaaDecoder.from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[int] = MaMaaaModelTester(self )
lowerCAmelCase : Any = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = model_class(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ )
lowerCAmelCase, lowerCAmelCase : Any = model_class.from_pretrained(UpperCamelCase_ , output_loading_info=UpperCamelCase_ )
self.assertEqual(info['''missing_keys'''] , [] )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : int = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
if not self.is_encoder_decoder:
lowerCAmelCase : Union[str, Any] = inputs['''input_ids''']
del inputs["input_ids"]
else:
lowerCAmelCase : List[str] = inputs['''input_ids''']
lowerCAmelCase : int = inputs.get('''decoder_input_ids''' , UpperCamelCase_ )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase : Optional[Any] = wte(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = wte(UpperCamelCase_ )
lowerCAmelCase : List[str] = wte(UpperCamelCase_ )
with torch.no_grad():
model(**UpperCamelCase_ )[0]
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : int = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = MaMaaaForConditionalGeneration(UpperCamelCase_ ).eval().to(UpperCamelCase_ )
if torch_device == "cuda":
model.half()
model.generate(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
model.generate(num_beams=4 , do_sample=UpperCamelCase_ , early_stopping=UpperCamelCase_ , num_return_sequences=3 )
def _snake_case ( _snake_case : Dict ):
return torch.tensor(_snake_case , dtype=torch.long , device=_snake_case )
snake_case__ : Any = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Optional[int] ):
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(UpperCamelCase_ )
lowerCAmelCase : int = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCAmelCase : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCAmelCase : str = prepare_mam_aaa_inputs_dict(model.config , UpperCamelCase_ , UpperCamelCase_ )
with torch.no_grad():
lowerCAmelCase : str = model(**UpperCamelCase_ )[0]
lowerCAmelCase : Optional[int] = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , UpperCamelCase_ )
# change to expected output here
lowerCAmelCase : List[Any] = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=UpperCamelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(UpperCamelCase_ )
# change to intended input
lowerCAmelCase : Tuple = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCAmelCase : Optional[Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCAmelCase : str = prepare_mam_aaa_inputs_dict(model.config , UpperCamelCase_ , UpperCamelCase_ )
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCamelCase_ )
# change to expected output here
lowerCAmelCase : List[str] = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=UpperCamelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
lowerCAmelCase : Any = [
'''L\'affaire NSA souligne l\'absence totale de dรฉbat sur le renseignement''',
'''Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.''',
'''Lorsque Franรงois Hollande tรฉlรฉphone ร Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils rรฉagissent ร une vraie dรฉcouverte, qui est celle de'''
''' l\'ampleur de la surveillance amรฉricaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : Optional[int] = model.generate(
input_ids=dct['''input_ids'''].to(UpperCamelCase_ ) , attention_mask=dct['''attention_mask'''].to(UpperCamelCase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
lowerCAmelCase : Tuple = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
lowerCAmelCase : List[Any] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
assert generated == expected_en
| 314 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Any = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_( a__ ):
__UpperCamelCase = '''vit_msn'''
def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : List[str] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
| 314 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase : str = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCamelCase : List[Any] = "The dog is cute and lives in the garden house"
UpperCamelCase : List[str] = jnp.array([tokenizer.encode(A_ )] )
UpperCamelCase : List[str] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase : int = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
UpperCamelCase : Tuple = model(A_ )["last_hidden_state"]
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A_ , atol=1e-3 ) )
| 52 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__lowerCamelCase : str = random.Random()
if is_torch_available():
import torch
def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
if rng is None:
UpperCamelCase : Optional[int] = global_rng
UpperCamelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ):
'''simple docstring'''
UpperCamelCase : Tuple = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : List[Any] = min_seq_length
UpperCamelCase : List[str] = max_seq_length
UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Union[str, Any] = feature_size
UpperCamelCase : List[str] = padding_value
UpperCamelCase : Optional[Any] = sampling_rate
UpperCamelCase : List[str] = return_attention_mask
UpperCamelCase : List[Any] = do_normalize
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase( self , A_=False , A_=False ):
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = ASTFeatureExtractor
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ASTFeatureExtractionTester(self )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : int = np.asarray(A_ )
UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values
UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
UpperCamelCase : List[Any] = self._load_datasamples(1 )
UpperCamelCase : Tuple = ASTFeatureExtractor()
UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
| 52 | 1 |
import math
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A = "Enter the base and the power separated by a comma: "
__A , __A = map(int, input(prompt).split(","))
__A , __A = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A = res(xa, ya)
__A = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 359 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 0 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase__ : Optional[Any] = NewType("""DataClass""", Any)
lowercase__ : Optional[Any] = NewType("""DataClassType""", Any)
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> str:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def UpperCamelCase_ ( lowerCAmelCase__ : list ) -> Callable[[str], Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = {str(lowerCAmelCase__ ): choice for choice in choices}
return lambda lowerCAmelCase__ : str_to_choice.get(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( *,
lowerCAmelCase__ : Union[str, List[str]] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : Any = dataclasses.MISSING , lowerCAmelCase__ : Callable[[], Any] = dataclasses.MISSING , lowerCAmelCase__ : dict = None , **lowerCAmelCase__ : Any , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Union[str, Any] = {}
if aliases is not None:
lowerCAmelCase_ : List[str] = aliases
if help is not None:
lowerCAmelCase_ : Dict = help
return dataclasses.field(metadata=lowerCAmelCase__ , default=lowerCAmelCase__ , default_factory=lowerCAmelCase__ , **lowerCAmelCase__ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[DataClassType, Iterable[DataClassType]] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : str = ArgumentDefaultsHelpFormatter
super().__init__(**SCREAMING_SNAKE_CASE_ )
if dataclasses.is_dataclass(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Any = [dataclass_types]
lowerCAmelCase_ : Dict = list(SCREAMING_SNAKE_CASE_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(SCREAMING_SNAKE_CASE_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ : ArgumentParser , SCREAMING_SNAKE_CASE_ : dataclasses.Field ):
lowerCAmelCase_ : List[Any] = F"--{field.name}"
lowerCAmelCase_ : str = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , SCREAMING_SNAKE_CASE_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
lowerCAmelCase_ : Dict = kwargs.pop('aliases' , [] )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[Any] = [aliases]
lowerCAmelCase_ : Optional[int] = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(SCREAMING_SNAKE_CASE_ , 'UnionType' ) and isinstance(SCREAMING_SNAKE_CASE_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(SCREAMING_SNAKE_CASE_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F" Problem encountered in field '{field.name}'." )
if type(SCREAMING_SNAKE_CASE_ ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : List[str] = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : Optional[int] = (
field.type.__args__[0] if isinstance(SCREAMING_SNAKE_CASE_ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : Optional[Any] = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , SCREAMING_SNAKE_CASE_ ) and issubclass(field.type , SCREAMING_SNAKE_CASE_ )):
if origin_type is Literal:
lowerCAmelCase_ : Tuple = field.type.__args__
else:
lowerCAmelCase_ : Union[str, Any] = [x.value for x in field.type]
lowerCAmelCase_ : Dict = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : List[str] = field.default
else:
lowerCAmelCase_ : str = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : List[Any] = copy(SCREAMING_SNAKE_CASE_ )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Union[str, Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : List[str] = '?'
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : Tuple = True
elif isclass(SCREAMING_SNAKE_CASE_ ) and issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Dict = field.type.__args__[0]
lowerCAmelCase_ : int = '+'
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Tuple = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : str = True
else:
lowerCAmelCase_ : Dict = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : int = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Union[str, Any] = field.default_factory()
else:
lowerCAmelCase_ : Optional[Any] = True
parser.add_argument(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : List[str] = False
parser.add_argument(F"--no_{field.name}" , action='store_false' , dest=field.name , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : DataClassType ):
if hasattr(SCREAMING_SNAKE_CASE_ , '_argument_group_name' ):
lowerCAmelCase_ : Union[str, Any] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Tuple = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(SCREAMING_SNAKE_CASE_ )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Tuple = '.'.join(map(SCREAMING_SNAKE_CASE_ , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(SCREAMING_SNAKE_CASE_ ):
if not field.init:
continue
lowerCAmelCase_ : Tuple = type_hints[field.name]
self._parse_dataclass_field(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : Optional[Any] = []
if args_filename:
args_files.append(Path(SCREAMING_SNAKE_CASE_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : Optional[int] = ArgumentParser()
args_file_parser.add_argument(SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = args_file_parser.parse_known_args(args=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = vars(SCREAMING_SNAKE_CASE_ ).get(args_file_flag.lstrip('-' ) , SCREAMING_SNAKE_CASE_ )
if cmd_args_file_paths:
args_files.extend([Path(SCREAMING_SNAKE_CASE_ ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Tuple = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ ,lowerCAmelCase_ : str = self.parse_known_args(args=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : Tuple = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE_ ) if f.init}
lowerCAmelCase_ : Union[str, Any] = {k: v for k, v in vars(SCREAMING_SNAKE_CASE_ ).items() if k in keys}
for k in keys:
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = dtype(**SCREAMING_SNAKE_CASE_ )
outputs.append(SCREAMING_SNAKE_CASE_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(SCREAMING_SNAKE_CASE_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Dict[str, Any] , SCREAMING_SNAKE_CASE_ : bool = False ):
lowerCAmelCase_ : Tuple = set(args.keys() )
lowerCAmelCase_ : List[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : Optional[int] = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE_ ) if f.init}
lowerCAmelCase_ : Dict = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : int = dtype(**SCREAMING_SNAKE_CASE_ )
outputs.append(SCREAMING_SNAKE_CASE_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(SCREAMING_SNAKE_CASE_ )}" )
return tuple(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
with open(Path(SCREAMING_SNAKE_CASE_ ) , encoding='utf-8' ) as open_json_file:
lowerCAmelCase_ : str = json.loads(open_json_file.read() )
lowerCAmelCase_ : Union[str, Any] = self.parse_dict(SCREAMING_SNAKE_CASE_ , allow_extra_keys=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
lowerCAmelCase_ : Optional[int] = self.parse_dict(yaml.safe_load(Path(SCREAMING_SNAKE_CASE_ ).read_text() ) , allow_extra_keys=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 224 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : int = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = None
ops.enable_eager_execution_internal()
lowerCAmelCase_ : str = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase_ : Dict = tf.config.list_logical_devices(device_type='CPU' )
lowerCAmelCase_ : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase_ : Union[str, Any] = GradientAccumulator()
lowerCAmelCase_ : int = tf.Variable([4.0, 3.0] )
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = create_optimizer(5E-5 , 1_0 , 5 )
lowerCAmelCase_ : Union[str, Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE_ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE_ : Optional[int] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
with strategy.scope():
lowerCAmelCase_ : Tuple = strategy.experimental_local_results(SCREAMING_SNAKE_CASE_ )
local_variables[0].assign(SCREAMING_SNAKE_CASE_ )
local_variables[1].assign(SCREAMING_SNAKE_CASE_ )
strategy.run(SCREAMING_SNAKE_CASE_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE_ )
def _check_local_values(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 224 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase : List[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
_A : Any = """albert"""
def __init__( self : List[str] , __a : List[str]=30000 , __a : Optional[int]=128 , __a : Optional[int]=4096 , __a : List[Any]=12 , __a : int=1 , __a : List[str]=64 , __a : Dict=16384 , __a : List[Any]=1 , __a : Optional[int]="gelu_new" , __a : Optional[int]=0 , __a : List[str]=0 , __a : Union[str, Any]=512 , __a : Any=2 , __a : str=0.02 , __a : Dict=1E-12 , __a : List[str]=0.1 , __a : int="absolute" , __a : List[Any]=0 , __a : int=2 , __a : Tuple=3 , **__a : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : Tuple = vocab_size
__lowercase : Optional[int] = embedding_size
__lowercase : str = hidden_size
__lowercase : Any = num_hidden_layers
__lowercase : List[str] = num_hidden_groups
__lowercase : Tuple = num_attention_heads
__lowercase : str = inner_group_num
__lowercase : int = hidden_act
__lowercase : List[str] = intermediate_size
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : List[Any] = type_vocab_size
__lowercase : List[str] = initializer_range
__lowercase : List[Any] = layer_norm_eps
__lowercase : Any = classifier_dropout_prob
__lowercase : Any = position_embedding_type
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 356 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306 | 0 |
def lowerCAmelCase__ ( lowerCamelCase_ : float):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCamelCase_ ,lowerCamelCase_):
raise ValueError('''Length must be a positive.''')
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase__ ( lowerCamelCase_ : float):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCamelCase_ ,lowerCamelCase_):
raise ValueError('''Length must be a positive.''')
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='''utf-8''' ,check=__lowerCamelCase ,)
assert hasattr(self ,'''env''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Optional[Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__lowerCamelCase ,instance_count=__lowerCamelCase ,instance_type=self.instance_type ,debugger_hook_config=__lowerCamelCase ,hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__lowerCamelCase ,py_version='''py36''' ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
TrainingJobAnalytics(__lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.create_estimator(__lowerCamelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCAmelCase__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,__lowerCamelCase )
| 129 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_A = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
require_version(deps[pkg] , SCREAMING_SNAKE_CASE__ )
| 117 | 1 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = parent
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {}
def __a ( ):
UpperCAmelCase_ : int = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
UpperCAmelCase_ : Dict = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class A_ (_UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = MarkupLMFeatureExtractionTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize feature_extractor
UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase_ : Optional[Any] = get_html_strings()[0]
UpperCAmelCase_ : Any = feature_extractor(lowercase_ )
# fmt: off
UpperCAmelCase_ : Dict = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
UpperCAmelCase_ : Optional[Any] = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , lowercase_ )
self.assertEqual(encoding.xpaths , lowercase_ )
# Test batched
UpperCAmelCase_ : Any = get_html_strings()
UpperCAmelCase_ : List[Any] = feature_extractor(lowercase_ )
# fmt: off
UpperCAmelCase_ : List[Any] = expected_nodes + [["My First Heading", "My first paragraph."]]
UpperCAmelCase_ : List[Any] = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowercase_ )
self.assertEqual(encoding.xpaths , lowercase_ )
| 61 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
lowerCAmelCase__ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = ["""input_ids""", """attention_mask"""]
lowercase_ = DistilBertTokenizer
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE : str="[SEP]" , SCREAMING_SNAKE_CASE : Dict="[PAD]" , SCREAMING_SNAKE_CASE : List[str]="[CLS]" , SCREAMING_SNAKE_CASE : List[str]="[MASK]" , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=None , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowercase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop("type" ) )
lowercase__ : Any = do_lower_case
lowercase__ : Optional[int] = strip_accents
lowercase__ : List[Any] = tokenize_chinese_chars
lowercase__ : List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = do_lower_case
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict=None ):
lowercase__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : Dict = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
lowercase__ : Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
| 130 | 0 |
"""simple docstring"""
from collections.abc import Callable
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCAmelCase__ :dict = {}
# Stores current size of heap.
lowerCAmelCase__ :Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCAmelCase__ :List[str] = key or (lambda __UpperCAmelCase : x)
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCAmelCase__ :Optional[int] = self.arr[j], self.arr[i]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self._left(__lowercase )
lowerCAmelCase__ :Optional[int] = self._right(__lowercase )
lowerCAmelCase__ :Dict = i
if left is not None and not self._cmp(__lowercase , __lowercase ):
lowerCAmelCase__ :Optional[int] = left
if right is not None and not self._cmp(__lowercase , __lowercase ):
lowerCAmelCase__ :Tuple = right
return valid_parent
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = self._parent(__lowercase )
while parent is not None and not self._cmp(__lowercase , __lowercase ):
self._swap(__lowercase , __lowercase )
lowerCAmelCase__ :Tuple = parent, self._parent(__lowercase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._get_valid_parent(__lowercase )
while valid_parent != index:
self._swap(__lowercase , __lowercase )
lowerCAmelCase__ :Dict = valid_parent, self._get_valid_parent(__lowercase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if item not in self.pos_map:
return
lowerCAmelCase__ :Tuple = self.pos_map[item]
lowerCAmelCase__ :Tuple = [item, self.key(__lowercase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowercase )
self._heapify_down(__lowercase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if item not in self.pos_map:
return
lowerCAmelCase__ :List[Any] = self.pos_map[item]
del self.pos_map[item]
lowerCAmelCase__ :int = self.arr[self.size - 1]
lowerCAmelCase__ :Union[str, Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowercase )
self._heapify_down(__lowercase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__lowercase )] )
else:
lowerCAmelCase__ :Optional[Any] = [item, self.key(__lowercase )]
lowerCAmelCase__ :Optional[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __A () ->Union[str, Any]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = IFInpaintingSuperResolutionPipeline
__magic_name__ :Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ :Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__magic_name__ :Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Dict = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self ):
'''simple docstring'''
self._test_save_load_local()
def snake_case ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 254 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = ArgumentParser('''Transformers CLI tool''', usage='''transformers-cli <command> [<args>]''' )
_UpperCamelCase = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
DownloadCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
RunCommand.register_subcommand(UpperCamelCase__ )
ServeCommand.register_subcommand(UpperCamelCase__ )
UserCommands.register_subcommand(UpperCamelCase__ )
AddNewModelCommand.register_subcommand(UpperCamelCase__ )
AddNewModelLikeCommand.register_subcommand(UpperCamelCase__ )
LfsCommands.register_subcommand(UpperCamelCase__ )
PTtoTFCommand.register_subcommand(UpperCamelCase__ )
# Let's go
_UpperCamelCase = parser.parse_args()
if not hasattr(UpperCamelCase__, '''func''' ):
parser.print_help()
exit(1 )
# Run
_UpperCamelCase = args.func(UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 194 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
A_ :str = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase__ ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
if "text_queries" in kwargs:
__UpperCamelCase : Tuple =kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
__UpperCamelCase : Dict ={"image": image, "candidate_labels": candidate_labels}
else:
__UpperCamelCase : Tuple =image
__UpperCamelCase : Dict =super().__call__(snake_case__ , **snake_case__ )
return results
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] ={}
if "threshold" in kwargs:
__UpperCamelCase : Optional[Any] =kwargs["threshold"]
if "top_k" in kwargs:
__UpperCamelCase : int =kwargs["top_k"]
return {}, {}, postprocess_params
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =load_image(inputs['image'] )
__UpperCamelCase : int =inputs["candidate_labels"]
if isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : int =candidate_labels.split(',' )
__UpperCamelCase : List[Any] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
__UpperCamelCase : Any =self.tokenizer(snake_case__ , return_tensors=self.framework )
__UpperCamelCase : str =self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =model_inputs.pop('target_size' )
__UpperCamelCase : str =model_inputs.pop('candidate_label' )
__UpperCamelCase : Dict =model_inputs.pop('is_last' )
__UpperCamelCase : Optional[Any] =self.model(**snake_case__ )
__UpperCamelCase : Tuple ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0.1 , lowerCamelCase__=None ):
"""simple docstring"""
__UpperCamelCase : Dict =[]
for model_output in model_outputs:
__UpperCamelCase : List[str] =model_output["candidate_label"]
__UpperCamelCase : Union[str, Any] =BaseModelOutput(snake_case__ )
__UpperCamelCase : str =self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
__UpperCamelCase : str =outputs["scores"][index].item()
__UpperCamelCase : Optional[int] =self._get_bounding_box(outputs['boxes'][index][0] )
__UpperCamelCase : Tuple ={"score": score, "label": label, "box": box}
results.append(snake_case__ )
__UpperCamelCase : int =sorted(snake_case__ , key=lambda lowerCamelCase__ : x["score"] , reverse=snake_case__ )
if top_k:
__UpperCamelCase : Optional[Any] =results[:top_k]
return results
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
__UpperCamelCase : int =box.int().tolist()
__UpperCamelCase : Dict ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 360 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Union[str, Any] = logging.get_logger(__name__)
A_ :Tuple = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""xlnet"""
UpperCamelCase__ : Tuple =["""mems"""]
UpperCamelCase__ : Any ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=32000 , lowerCamelCase__=1024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4096 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__="bi" , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=-1 , lowerCamelCase__=False , lowerCamelCase__="last" , lowerCamelCase__=True , lowerCamelCase__="tanh" , lowerCamelCase__=0.1 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =vocab_size
__UpperCamelCase : int =d_model
__UpperCamelCase : Optional[Any] =n_layer
__UpperCamelCase : str =n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__UpperCamelCase : Optional[Any] =d_model // n_head
__UpperCamelCase : List[Any] =ff_activation
__UpperCamelCase : Tuple =d_inner
__UpperCamelCase : List[Any] =untie_r
__UpperCamelCase : List[Any] =attn_type
__UpperCamelCase : Dict =initializer_range
__UpperCamelCase : List[str] =layer_norm_eps
__UpperCamelCase : List[str] =dropout
__UpperCamelCase : int =mem_len
__UpperCamelCase : List[Any] =reuse_len
__UpperCamelCase : Union[str, Any] =bi_data
__UpperCamelCase : Optional[Any] =clamp_len
__UpperCamelCase : Tuple =same_length
__UpperCamelCase : int =summary_type
__UpperCamelCase : Dict =summary_use_proj
__UpperCamelCase : Dict =summary_activation
__UpperCamelCase : str =summary_last_dropout
__UpperCamelCase : Dict =start_n_top
__UpperCamelCase : Optional[Any] =end_n_top
__UpperCamelCase : int =bos_token_id
__UpperCamelCase : Union[str, Any] =pad_token_id
__UpperCamelCase : Dict =eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCamelCase__ , )
__UpperCamelCase : Dict =kwargs['use_cache']
__UpperCamelCase : Optional[int] =use_mems_eval
__UpperCamelCase : Any =use_mems_train
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 245 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A : Any ={
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
_A : Optional[Any] =[
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
_A : Optional[int] =[
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
_A : List[Any] =[
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ : str = -1
lowerCamelCase__ : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Dict = a * b * c
if candidate >= product:
lowerCamelCase__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 41 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _A ( A__ , A__ ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
__lowercase = torch.permute(A__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A__ ):
# linear layer
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
__lowercase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if "metadata" in layer:
__lowercase = layer.split('''metadata''' )
__lowercase = ''''''.join(split_layer[0] )[:-1]
__lowercase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__lowercase = layer.split('''kvstore''' )
__lowercase = ''''''.join(split_layer[0] )[:-1]
__lowercase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__lowercase = layer.split('''/''' )
__lowercase = '''/'''.join(split_layer[:-1] )
__lowercase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowercase = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
__lowercase = '''file'''
else:
__lowercase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = rename_keys(A__ )
__lowercase = {}
for k, v in current_block.items():
__lowercase = v
__lowercase = new_current_block
torch.save(A__ , A__ )
def _A ( A__ , A__ , A__ , A__ , A__ = WEIGHTS_NAME ):
"""simple docstring"""
__lowercase = convert_file_size_to_int(A__ )
__lowercase = []
__lowercase = {}
__lowercase = 0
__lowercase = 0
os.makedirs(A__ , exist_ok=A__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__lowercase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__lowercase = flatten_dict(A__ , sep='''/''' )
__lowercase = {}
for layer in checkpoint_info.keys():
__lowercase , __lowercase , __lowercase = get_key_and_tensorstore_dict(
A__ , A__ , A__ )
if curr_real_layer_name in all_layers:
__lowercase = content
else:
__lowercase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowercase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowercase = torch.tensor(A__ )
__lowercase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowercase , __lowercase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , A__ )
__lowercase = '''/'''.join(A__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowercase = os.path.join(
A__ , weights_name.replace('''.bin''' , F"-{len(A__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(A__ , A__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowercase = {}
__lowercase = 0
__lowercase = raw_weights.to(getattr(A__ , A__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowercase = os.path.join(A__ , weights_name.replace('''.bin''' , F"-{len(A__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(A__ , A__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(A__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowercase = {}
__lowercase = {}
for idx, shard in enumerate(A__ ):
__lowercase = weights_name.replace(
'''.bin''' , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" ) # len(sharded_state_dicts):05d}
__lowercase = os.path.join(A__ , weights_name.replace('''.bin''' , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
__lowercase = shard
for key in shard:
__lowercase = shard_file
# Add the metadata
__lowercase = {'''total_size''': total_size}
__lowercase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(A__ , A__ ) , '''w''' , encoding='''utf-8''' ) as f:
__lowercase = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '''\n'''
f.write(A__ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _A ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowercase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__lowercase = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__lowercase = TaTokenizer.from_pretrained('''t5-small''' )
__lowercase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__lowercase = tokenizer(A__ , return_tensors='''pt''' ).input_ids
__lowercase = model.generate(A__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 52 |
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase , __lowercase = 1, 1
__lowercase = 2
while True:
__lowercase = 0
__lowercase = fa + fa
__lowercase , __lowercase = fa, f
index += 1
for _ in str(A__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 52 | 1 |
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
pass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
pass
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = [
[],
[],
[],
]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(UpperCamelCase_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ) -> str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(UpperCamelCase_ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
__lowerCamelCase = min(self.queue )
self.queue.remove(UpperCamelCase_ )
return data
def __str__( self ) -> str:
'''simple docstring'''
return str(self.queue )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(__UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(__UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 90 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
a_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clรฉment Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class UpperCAmelCase_ ( unittest.TestCase , snake_case ):
def _lowerCamelCase ( self ) -> int:
__lowercase : List[Any] = load_tool('''text-question-answering''' )
self.tool.setup()
__lowercase : Any = load_tool('''text-question-answering''' , remote=UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Dict = self.tool(UpperCamelCase_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCamelCase_ , '''launched the BigScience Research Workshop''' )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[int] = self.remote_tool(UpperCamelCase_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCamelCase_ , '''launched the BigScience Research Workshop''' )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Union[str, Any] = self.tool(text=UpperCamelCase_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCamelCase_ , '''launched the BigScience Research Workshop''' )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = self.remote_tool(text=UpperCamelCase_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCamelCase_ , '''launched the BigScience Research Workshop''' )
| 249 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model'}
lowerCAmelCase_ : Tuple = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Any , __a : Optional[int]=False , __a : List[Any]=True , __a : str=False , __a : Union[str, Any]="<s>" , __a : Optional[Any]="</s>" , __a : List[str]="<unk>" , __a : Tuple="<sep>" , __a : Optional[int]="<pad>" , __a : Tuple="<cls>" , __a : Tuple="<mask>" , __a : Optional[Any]=["<eop>", "<eod>"] , __a : Optional[Dict[str, Any]] = None , **__a : Tuple , ):
_a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = 3
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_a = jieba
_a = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase__ ( self : str ):
return len(self.sp_model )
def UpperCamelCase__ ( self : List[Any] ):
_a = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : Tuple , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
if self.remove_space:
_a = " ".join(inputs.strip().split() )
else:
_a = inputs
_a = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_a = unicodedata.normalize("NFKD" , __a )
_a = "".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
_a = outputs.lower()
return outputs
def UpperCamelCase__ ( self : str , __a : str ):
_a = self.preprocess_text(__a )
_a = self.sp_model.encode(__a , out_type=__a )
_a = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a = cur_pieces[1:]
else:
_a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def UpperCamelCase__ ( self : Dict , __a : Tuple ):
return self.sp_model.PieceToId(__a )
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
return self.sp_model.IdToPiece(__a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[int] ):
_a = "".join(__a ).replace(__a , " " ).strip()
return out_string
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ ( self : str , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def UpperCamelCase__ ( self : Optional[int] , *__a : Dict , **__a : List[Any] ):
_a = super()._decode(*__a , **__a )
_a = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 361 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = 'โ'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["โThis", "โis", "โa", "โt", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsรฉ." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"รฉ",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 | 0 |
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = graph
# mapping node to its parent in resulting breadth first tree
A__ = {}
A__ = source_vertex
def snake_case__ ( self ) -> Tuple:
A__ = {self.source_vertex}
A__ = None
A__ = [self.source_vertex] # first in first out queue
while queue:
A__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowercase_ )
A__ = vertex
queue.append(lowercase_ )
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
if target_vertex == self.source_vertex:
return self.source_vertex
A__ = self.parent.get(lowercase_ )
if target_vertex_parent is None:
A__ = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowercase_ )
return self.shortest_path(lowercase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 221 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
UpperCAmelCase_ : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=32 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : str = pad_token_id
UpperCAmelCase_ : str = bos_token_id
UpperCAmelCase_ : List[Any] = initializer_range
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : str = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
UpperCAmelCase_ : Optional[int] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : int = model_class_name(lowercase_ )
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : int = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Optional[Any] = model.decode(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : Any = model_class_name(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : int = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Dict = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
UpperCAmelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 99
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : Any = input_ids.shape[0]
UpperCAmelCase_ : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._get_config_and_data()
UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : Optional[int] = lm_model(input_ids=lowercase_ )
UpperCAmelCase_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Tuple = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
UpperCAmelCase_ : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ : Dict = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : Tuple = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : Optional[Any] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ (lowercase__ ,unittest.TestCase ,lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = FlaxBlenderbotSmallModelTester(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = model_class(lowercase_ )
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_ ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : List[Any] = encode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[Any] = encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase_ : int = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_ ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : str = decode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : List[Any] = decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 61 | 0 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
a_ = logging.get_logger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **__lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : int = []
__A : str = []
__A : Any = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__A : List[str] = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
__A : Optional[int] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = BeautifulSoup(__lowerCamelCase , '''html.parser''' )
__A : int = []
__A : List[Any] = []
__A : int = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__A : Optional[int] = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
__A , __A : str = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = ''''''
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : List[Any] = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
__A : Tuple = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(__lowerCamelCase )}.""" )
__A : Optional[int] = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
__A : Any = [html_strings]
# Get nodes + xpaths
__A : Tuple = []
__A : Tuple = []
for html_string in html_strings:
__A , __A , __A : Optional[int] = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
__A : Optional[int] = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__A : Optional[Any] = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
__A : Tuple = {'''nodes''': nodes, '''xpaths''': xpaths}
__A : List[Any] = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 291 |
"""simple docstring"""
a_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
a_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowercase ( snake_case_ : float ,snake_case_ : str ,snake_case_ : str ) ->float:
'''simple docstring'''
__A : Tuple = from_type.lower().strip('''s''' )
__A : Optional[int] = to_type.lower().strip('''s''' )
__A : List[str] = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
__A : Any = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__A : int = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__A : str = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
__A : Optional[Any] = METRIC_CONVERSION[from_sanitized]
__A : Optional[int] = METRIC_CONVERSION[to_sanitized]
__A : Union[str, Any] = 1
if from_exponent > to_exponent:
__A : Dict = from_exponent - to_exponent
else:
__A : Union[str, Any] = -(to_exponent - from_exponent)
return value * pow(10 ,snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 1 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : int = 100 ) -> int:
lowerCamelCase_ : Dict =set()
lowerCamelCase_ : Tuple =0
lowerCamelCase_ : Optional[Any] =n + 1 # maximum limit
for a in range(2 , lowerCamelCase__ ):
for b in range(2 , lowerCamelCase__ ):
lowerCamelCase_ : int =a**b # calculates the current power
collect_powers.add(lowerCamelCase__ ) # adds the result to the set
return len(lowerCamelCase__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 144 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Dict = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[Any] = "instructblip_vision_model"
def __init__( self : Optional[Any] , snake_case__ : Optional[int]=1408 , snake_case__ : Optional[Any]=6144 , snake_case__ : str=39 , snake_case__ : Tuple=16 , snake_case__ : int=224 , snake_case__ : int=14 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Dict=1E-6 , snake_case__ : Tuple=0.0 , snake_case__ : Dict=1E-10 , snake_case__ : Optional[Any]=True , **snake_case__ : str , ):
super().__init__(**snake_case__ )
lowerCamelCase_ : Optional[Any] =hidden_size
lowerCamelCase_ : str =intermediate_size
lowerCamelCase_ : List[str] =num_hidden_layers
lowerCamelCase_ : Optional[int] =num_attention_heads
lowerCamelCase_ : str =patch_size
lowerCamelCase_ : Optional[Any] =image_size
lowerCamelCase_ : int =initializer_range
lowerCamelCase_ : Dict =attention_dropout
lowerCamelCase_ : int =layer_norm_eps
lowerCamelCase_ : Optional[Any] =hidden_act
lowerCamelCase_ : List[Any] =qkv_bias
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : List[str] ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : List[Any] =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
lowerCamelCase_ : Union[str, Any] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "instructblip_qformer"
def __init__( self : List[str] , snake_case__ : List[Any]=3_0522 , snake_case__ : str=768 , snake_case__ : Optional[int]=12 , snake_case__ : List[Any]=12 , snake_case__ : str=3072 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Union[str, Any]=512 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Any=1E-12 , snake_case__ : Optional[int]=0 , snake_case__ : List[Any]="absolute" , snake_case__ : int=2 , snake_case__ : int=1408 , **snake_case__ : Optional[Any] , ):
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =vocab_size
lowerCamelCase_ : List[str] =hidden_size
lowerCamelCase_ : int =num_hidden_layers
lowerCamelCase_ : Tuple =num_attention_heads
lowerCamelCase_ : Any =hidden_act
lowerCamelCase_ : Union[str, Any] =intermediate_size
lowerCamelCase_ : List[str] =hidden_dropout_prob
lowerCamelCase_ : Tuple =attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] =max_position_embeddings
lowerCamelCase_ : List[Any] =initializer_range
lowerCamelCase_ : Any =layer_norm_eps
lowerCamelCase_ : Union[str, Any] =position_embedding_type
lowerCamelCase_ : Union[str, Any] =cross_attention_frequency
lowerCamelCase_ : Union[str, Any] =encoder_hidden_size
@classmethod
def UpperCAmelCase__ ( cls : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Tuple ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Any =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
lowerCamelCase_ : Dict =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Tuple = "instructblip"
_UpperCAmelCase :List[Any] = True
def __init__( self : str , snake_case__ : List[Any]=None , snake_case__ : str=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=32 , **snake_case__ : List[Any] ):
super().__init__(**snake_case__ )
if vision_config is None:
lowerCamelCase_ : str ={}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
lowerCamelCase_ : str ={}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
lowerCamelCase_ : Optional[int] ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase_ : Union[str, Any] =InstructBlipVisionConfig(**snake_case__ )
lowerCamelCase_ : Union[str, Any] =InstructBlipQFormerConfig(**snake_case__ )
lowerCamelCase_ : Union[str, Any] =text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase_ : Union[str, Any] =CONFIG_MAPPING[text_model_type](**snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.text_config.tie_word_embeddings
lowerCamelCase_ : Any =self.text_config.is_encoder_decoder
lowerCamelCase_ : Dict =num_query_tokens
lowerCamelCase_ : Any =self.vision_config.hidden_size
lowerCamelCase_ : int =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase_ : List[Any] =1.0
lowerCamelCase_ : Union[str, Any] =0.02
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , snake_case__ : InstructBlipVisionConfig , snake_case__ : InstructBlipQFormerConfig , snake_case__ : PretrainedConfig , **snake_case__ : Any , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case__ , )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Dict =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Dict =self.vision_config.to_dict()
lowerCamelCase_ : Optional[Any] =self.qformer_config.to_dict()
lowerCamelCase_ : Optional[Any] =self.text_config.to_dict()
lowerCamelCase_ : Any =self.__class__.model_type
return output
| 144 | 1 |
from __future__ import annotations
from typing import Any
class __snake_case :
def __init__( self : int , _lowercase : int , _lowercase : int , _lowercase : float = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = row, column
SCREAMING_SNAKE_CASE__ = [[default_value for c in range(_lowercase )] for r in range(_lowercase )]
def __str__( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
SCREAMING_SNAKE_CASE__ = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE__ = max(_lowercase , len(str(_lowercase ) ) )
SCREAMING_SNAKE_CASE__ = f"""%{max_element_length}s"""
# Make string and return
def single_line(_lowercase : list[float] ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE__ = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowercase ) for row_vector in self.array )
return s
def __repr__( self : int ):
"""simple docstring"""
return str(self )
def __a ( self : Optional[int] , _lowercase : tuple[int, int] ):
"""simple docstring"""
if not (isinstance(_lowercase , (list, tuple) ) and len(_lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Union[str, Any] , _lowercase : tuple[int, int] ):
"""simple docstring"""
assert self.validate_indicies(_lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , _lowercase : tuple[int, int] , _lowercase : float ):
"""simple docstring"""
assert self.validate_indicies(_lowercase )
SCREAMING_SNAKE_CASE__ = value
def __add__( self : str , _lowercase : Matrix ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = self[r, c] + another[r, c]
return result
def __neg__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = -self[r, c]
return result
def __sub__( self : int , _lowercase : Matrix ):
"""simple docstring"""
return self + (-another)
def __mul__( self : Optional[Any] , _lowercase : int | float | Matrix ):
"""simple docstring"""
if isinstance(_lowercase , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = self[r, c] * another
return result
elif isinstance(_lowercase , _lowercase ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE__ = f"""Unsupported type given for another ({type(_lowercase )})"""
raise TypeError(_lowercase )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = self[r, c]
return result
def __a ( self : List[Any] , _lowercase : Matrix , _lowercase : Matrix ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE__ = v.transpose()
SCREAMING_SNAKE_CASE__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE__ = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
SCREAMING_SNAKE_CASE__ = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1, 2, -3
SCREAMING_SNAKE_CASE__ = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCamelCase , __UpperCamelCase )}""" )
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 204 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[int] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 204 | 1 |
from __future__ import annotations
A__ : List[Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class __snake_case :
def __init__( self : Tuple , A_ : dict[str, list[str]] , A_ : str):
lowerCAmelCase_ : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase_ : dict[str, str | None] = {}
lowerCAmelCase_ : List[Any] = source_vertex
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Tuple = {self.source_vertex}
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : List[str] = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase_ : int = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(A_)
lowerCAmelCase_ : Union[str, Any] = vertex
queue.append(A_)
def UpperCAmelCase__ ( self : Optional[Any] , A_ : str):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase_ : Dict = self.parent.get(A_)
if target_vertex_parent is None:
lowerCAmelCase_ : Optional[Any] = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(A_)
return self.shortest_path(A_) + F"""->{target_vertex}"""
if __name__ == "__main__":
A__ : Union[str, Any] = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 103 |
import argparse
import os
import re
import packaging.version
A__ : Dict = '''examples/'''
A__ : Any = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
A__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
A__ : Any = '''README.md'''
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Tuple = f.read()
lowerCAmelCase_ , lowerCAmelCase_ : Dict = REPLACE_PATTERNS[pattern]
lowerCAmelCase_ : Tuple = replace.replace('''VERSION''' ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern='''examples''' )
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = '''๐ค Transformers currently provides the following architectures'''
lowerCAmelCase_ : List[Any] = '''1. Want to contribute a new model?'''
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Find the start of the list.
lowerCAmelCase_ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCAmelCase_ : int = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__UpperCamelCase )
def UpperCamelCase( ):
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
lowerCAmelCase_ : Optional[Any] = f.read()
lowerCAmelCase_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Dict=False ):
lowerCAmelCase_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCAmelCase_ : List[str] = default_version.base_version
elif patch:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : List[str] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase ,patch=__UpperCamelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def UpperCamelCase( ):
lowerCAmelCase_ : Any = get_version()
lowerCAmelCase_ : int = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCAmelCase_ : Optional[Any] = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : int = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
A__ : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 103 | 1 |
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , SCREAMING_SNAKE_CASE_ )
else:
return binary_search(a_list[midpoint + 1 :] , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = input('''Enter numbers separated by comma:\n''').strip()
snake_case_ = [int(item.strip()) for item in user_input.split(''',''')]
snake_case_ = int(input('''Enter the number to be found in the list:\n''').strip())
snake_case_ = '''''' if binary_search(sequence, target) else '''not '''
print(F'''{target} was {not_str}found in {sequence}''')
| 352 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Optional[Any] = ["""pixel_values"""]
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 255 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a)
lowercase__ : List[str] = size if size is not None else {'shortest_edge': 224}
lowercase__ : str = get_size_dict(a , default_to_square=a)
lowercase__ : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase__ : Union[str, Any] = get_size_dict(a , default_to_square=a , param_name='crop_size')
lowercase__ : List[str] = do_resize
lowercase__ : List[Any] = size
lowercase__ : Tuple = resample
lowercase__ : int = do_center_crop
lowercase__ : Union[str, Any] = crop_size
lowercase__ : int = do_rescale
lowercase__ : List[str] = rescale_factor
lowercase__ : Tuple = do_normalize
lowercase__ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : List[Any] = do_convert_rgb
def snake_case_ ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
lowercase__ : str = get_size_dict(a , default_to_square=a)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
lowercase__ : str = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a)
return resize(a , size=a , resample=a , data_format=a , **a)
def snake_case_ ( self , a , a , a = None , **a , ):
lowercase__ : List[str] = get_size_dict(a)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a)
def snake_case_ ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a)
def snake_case_ ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a)
def snake_case_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Tuple = size if size is not None else self.size
lowercase__ : Union[str, Any] = get_size_dict(a , param_name='size' , default_to_square=a)
lowercase__ : Optional[Any] = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Union[str, Any] = get_size_dict(a , param_name='crop_size' , default_to_square=a)
lowercase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : str = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : str = [convert_to_rgb(a) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Dict = [to_numpy_array(a) for image in images]
if do_resize:
lowercase__ : Tuple = [self.resize(image=a , size=a , resample=a) for image in images]
if do_center_crop:
lowercase__ : List[str] = [self.center_crop(image=a , size=a) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=a , scale=a) for image in images]
if do_normalize:
lowercase__ : Tuple = [self.normalize(image=a , mean=a , std=a) for image in images]
lowercase__ : Optional[int] = [to_channel_dimension_format(a , a) for image in images]
lowercase__ : Dict = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 216 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowercase : Optional[Any] = "3"
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 27 |
import enum
import shutil
import sys
UpperCAmelCase, UpperCAmelCase : Union[str, Any] = shutil.get_terminal_size()
UpperCAmelCase : Dict = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __lowercase ( enum.Enum ):
"""simple docstring"""
UpperCamelCase : Any = 0
UpperCamelCase : int = 1
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any="" ):
'''simple docstring'''
sys.stdout.write(str(lowerCamelCase__ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple="" ):
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite("""\r""" )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : str ):
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ):
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 252 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=19 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : Any = is_training
snake_case_ : Any = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : List[Any] = num_labels
snake_case_ : Optional[Any] = num_choices
snake_case_ : Union[str, Any] = scope
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_input_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = None
snake_case_ : List[str] = None
if self.use_labels:
snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__magic_name__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = EsmForProteinFolding(config=__magic_name__ ).float()
model.to(__magic_name__ )
model.eval()
snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ )
snake_case_ : int = model(__magic_name__ )
snake_case_ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : str = config_and_inputs
snake_case_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Union[str, Any] = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCamelCase_ : Optional[Any] = ()
lowerCamelCase_ : Tuple = {} if is_torch_available() else {}
lowerCamelCase_ : Optional[Any] = False
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = EsmFoldModelTester(self )
snake_case_ : List[str] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip('''Does not support attention outputs''' )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold only has one output format.''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
@require_torch
class __lowerCAmelCase ( _a ):
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
snake_case_ : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
snake_case_ : Dict = model(__magic_name__ )['''positions''']
snake_case_ : Tuple = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __magic_name__ , atol=1e-4 ) )
| 279 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCamelCase = 10
def lowerCAmelCase__( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ) -> int:
for i in range(lowercase , lowercase ):
if array[i] == target:
return i
return -1
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> int:
__snake_case : str = 0
__snake_case : Tuple = len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
__snake_case : Dict = (left + right) // 3 + 1
__snake_case : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__snake_case : Optional[int] = one_third - 1
elif array[two_third] < target:
__snake_case : str = two_third + 1
else:
__snake_case : Union[str, Any] = one_third + 1
__snake_case : str = two_third - 1
else:
return -1
def lowerCAmelCase__( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
__snake_case : Tuple = (left + right) // 3 + 1
__snake_case : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase , one_third - 1 , lowercase , lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase , lowercase , lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase , lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input('''Enter numbers separated by comma:\n''').strip()
_UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_UpperCamelCase = int(input('''Enter the number to be found in the list:\n''').strip())
_UpperCamelCase = ite_ternary_search(collection, target)
_UpperCamelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 326 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
_UpperCamelCase = logging.getLogger(__name__)
def lowerCAmelCase__( lowercase : str ) -> List[str]:
__snake_case : int = git.Repo(search_parent_directories=lowercase )
__snake_case : Union[str, Any] = {
"repo_id": str(lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f:
json.dump(lowercase , lowercase , indent=4 )
def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]:
if params.n_gpu <= 0:
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = -1
__snake_case : Union[str, Any] = True
__snake_case : Tuple = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] )
__snake_case : int = int(os.environ["N_GPU_NODE"] )
__snake_case : Union[str, Any] = int(os.environ["RANK"] )
# number of nodes / node ID
__snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node
__snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node
__snake_case : Union[str, Any] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__snake_case : Any = 1
__snake_case : str = 0
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
__snake_case : int = 1
__snake_case : Optional[Any] = 1
__snake_case : Tuple = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0
__snake_case : List[Any] = params.n_nodes > 1
# summary
__snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 326 | 1 |
from string import ascii_uppercase
lowercase__ : List[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> str:
if isinstance(__UpperCamelCase , __UpperCamelCase):
raise TypeError("int() can't convert non-string with explicit base")
if num < 0:
raise ValueError("parameter must be positive int")
if isinstance(__UpperCamelCase , __UpperCamelCase):
raise TypeError("'str' object cannot be interpreted as an integer")
if isinstance(__UpperCamelCase , __UpperCamelCase):
raise TypeError("'float' object cannot be interpreted as an integer")
if base in (0, 1):
raise ValueError("base must be >= 2")
if base > 36:
raise ValueError("base must be <= 36")
a = ""
a = 0
a = 0
while div != 1:
a , a = divmod(__UpperCamelCase , __UpperCamelCase)
if base >= 11 and 9 < mod < 36:
a = ALPHABET_VALUES[str(__UpperCamelCase)]
else:
a = str(__UpperCamelCase)
new_value += actual_value
a = num // base
a = div
if div == 0:
return str(new_value[::-1])
elif div == 1:
new_value += str(__UpperCamelCase)
return str(new_value[::-1])
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 180 |
lowercase__ : Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 180 | 1 |
import argparse
import os
import re
import packaging.version
A : Any = '''examples/'''
A : List[Any] = {
'''examples''': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), '''release = "VERSION"\n'''),
}
A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
A : Optional[Any] = '''README.md'''
def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[str]:
with open(a__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.read()
__a = REPLACE_PATTERNS[pattern]
__a = replace.replace('''VERSION''' , a__ )
__a = re_pattern.sub(a__ , a__ )
with open(a__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(a__ )
def __lowerCAmelCase ( a__ ) -> Union[str, Any]:
for folder, directories, fnames in os.walk(a__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern='''examples''' )
def __lowerCAmelCase ( a__ , a__=False ) -> Dict:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a__ , a__ , a__ )
if not patch:
update_version_in_examples(a__ )
def __lowerCAmelCase ( ) -> Any:
__a = '๐ค Transformers currently provides the following architectures'
__a = '1. Want to contribute a new model?'
with open(a__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Find the start of the list.
__a = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__a = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__a = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(a__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(a__ )
def __lowerCAmelCase ( ) -> Dict:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__a = f.read()
__a = REPLACE_PATTERNS['init'][0].search(a__ ).groups()[0]
return packaging.version.parse(a__ )
def __lowerCAmelCase ( a__=False ) -> List[Any]:
__a = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__a = default_version.base_version
elif patch:
__a = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__a = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__a = input(F"""Which version are you releasing? [{default_version}]""" )
if len(a__ ) == 0:
__a = default_version
print(F"""Updating version to {version}.""" )
global_version_update(a__ , patch=a__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __lowerCAmelCase ( ) -> Union[str, Any]:
__a = get_version()
__a = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__a = current_version.base_version
# Check with the user we got that right.
__a = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(a__ ) == 0:
__a = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(a__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
A : List[str] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
| 331 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : List[Any] = BarthezTokenizerFast
lowerCAmelCase : int = True
lowerCAmelCase : Tuple = True
def lowerCamelCase_ ( self : Optional[Any] ):
super().setUp()
UpperCAmelCase_ = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__snake_case )
UpperCAmelCase_ = tokenizer
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__snake_case ) , 10_11_22 )
def lowerCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = [0, 57, 30_18, 7_03_07, 91, 2]
UpperCAmelCase_ = self.tokenizer(
__snake_case , max_length=len(__snake_case ) , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase_ ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsรฉ.'''
UpperCAmelCase_ = tokenizer.tokenize(__snake_case )
UpperCAmelCase_ = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase_ = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase_ = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__snake_case )
UpperCAmelCase_ = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCamelCase_ ( self : Any ):
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCAmelCase_ = [
'''Le transformeur est un modรจle d\'apprentissage profond introduit en 2017, '''
'''utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).''',
'''ร l\'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus '''
'''pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches '''
'''telles que la traduction et la synthรจse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__snake_case , )
| 177 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
UpperCAmelCase_ = []
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Tuple[int, ...] ) -> Tuple[int, ...]:
UpperCAmelCase_ = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Optional[Sequence[bool]] = None , __UpperCamelCase : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__UpperCamelCase : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(__UpperCamelCase ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(__UpperCamelCase , __UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase , __UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(__UpperCamelCase )
UpperCAmelCase_ = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : torch.Tensor , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> torch.Tensor:
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(__UpperCamelCase , __UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , __UpperCamelCase ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Callable , __UpperCamelCase : Dict[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool = False , __UpperCamelCase : Any = None , __UpperCamelCase : bool = False , ) -> Any:
if not (len(__UpperCamelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
UpperCAmelCase_ = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , __UpperCamelCase )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=__UpperCamelCase , flat_end=min(__UpperCamelCase , i + chunk_size ) , no_batch_dims=len(__UpperCamelCase ) , )
UpperCAmelCase_ = tensor_tree_map(__UpperCamelCase , __UpperCamelCase )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase , __UpperCamelCase ):
def assign(__UpperCamelCase : dict , __UpperCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
assign(__UpperCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(__UpperCamelCase , __UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase , __UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(__UpperCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , __UpperCamelCase )
return out
class a :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : int = 5_12 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCamelCase_ ( self : List[Any] , __snake_case : Callable , __snake_case : tuple , __snake_case : int ):
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__snake_case : int ) -> bool:
try:
with torch.no_grad():
fn(*__snake_case , chunk_size=__snake_case )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__snake_case ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCamelCase_ ( self : int , __snake_case : Iterable , __snake_case : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__snake_case , __snake_case ):
assert type(__snake_case ) == type(__snake_case )
if isinstance(__snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
else:
consistent &= aa == aa
return consistent
def lowerCamelCase_ ( self : str , __snake_case : Callable , __snake_case : tuple , __snake_case : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __snake_case : a.shape if isinstance(__snake_case , torch.Tensor ) else a , __snake_case , __snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__snake_case )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __snake_case )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__snake_case , __snake_case , __snake_case , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 177 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = 10
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = [1, 2, 3, 4]
lowerCamelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
lowerCamelCase_ , lowerCamelCase_ = process_story(lowercase )
self.assertEqual(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = ""
lowerCamelCase_ , lowerCamelCase_ = process_story(lowercase )
self.assertEqual(lowercase , [] )
self.assertEqual(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
lowerCamelCase_ , lowerCamelCase_ = process_story(lowercase )
lowerCamelCase_ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(lowercase , lowercase )
lowerCamelCase_ = ["It was the best of times."]
self.assertEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = torch.tensor([1, 2, 3, 4] )
lowerCamelCase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowerCamelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCamelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = 101
lowerCamelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowerCamelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCamelCase_ = compute_token_type_ids(lowercase , lowercase )
np.testing.assert_array_equal(lowercase , lowercase )
| 19 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : str = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _lowercase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FF_FF_FF
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = B"""\x80""" + B"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4)
SCREAMING_SNAKE_CASE : List[str] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def _lowercase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def _lowercase ( self : int , UpperCAmelCase__ : Any ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = list(struct.unpack(""">16L""" , UpperCAmelCase__ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def _lowercase ( self : Any ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.padding()
SCREAMING_SNAKE_CASE : Any = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE : str = self.expand_block(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
SCREAMING_SNAKE_CASE : List[str] = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE : str = 0X5A_82_79_99
elif 2_0 <= i < 4_0:
SCREAMING_SNAKE_CASE : List[Any] = b ^ c ^ d
SCREAMING_SNAKE_CASE : Any = 0X6E_D9_EB_A1
elif 4_0 <= i < 6_0:
SCREAMING_SNAKE_CASE : Union[str, Any] = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE : List[str] = 0X8F_1B_BC_DC
elif 6_0 <= i < 8_0:
SCREAMING_SNAKE_CASE : Dict = b ^ c ^ d
SCREAMING_SNAKE_CASE : int = 0XCA_62_C1_D6
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = (
self.rotate(UpperCAmelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(UpperCAmelCase__ , 3_0 ),
c,
d,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = B"""Test String"""
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def __lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.read()
else:
SCREAMING_SNAKE_CASE : Tuple = bytes(_A , """utf-8""" )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 245 | 0 |
"""simple docstring"""
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCamelCase = logging.get_logger(__name__)
@dataclass
class lowercase__ :
def __init__( self : Dict , _UpperCAmelCase : Any=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=6.0 , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict="fp4" , _UpperCAmelCase : Dict=False , **_UpperCAmelCase : int , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = load_in_abit
UpperCAmelCase_ = load_in_abit
UpperCAmelCase_ = llm_inta_threshold
UpperCAmelCase_ = llm_inta_skip_modules
UpperCAmelCase_ = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase_ = llm_inta_has_fpaa_weight
UpperCAmelCase_ = bnb_abit_quant_type
UpperCAmelCase_ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase_ = torch.floataa
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , torch.dtype ):
UpperCAmelCase_ = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , _UpperCAmelCase ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _UpperCAmelCase ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _UpperCAmelCase ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , _UpperCAmelCase ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , _UpperCAmelCase ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , _UpperCAmelCase ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowercase__ ( cls : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = cls(**_UpperCAmelCase )
UpperCAmelCase_ = []
for key, value in kwargs.items():
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
to_remove.append(_UpperCAmelCase )
for key in to_remove:
kwargs.pop(_UpperCAmelCase , _UpperCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
UpperCAmelCase_ = self.to_dict()
UpperCAmelCase_ = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '\n'
writer.write(_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Dict[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self : Any ) -> Tuple:
'''simple docstring'''
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def lowercase__ ( self : Dict , _UpperCAmelCase : bool = True ) -> str:
'''simple docstring'''
if use_diff is True:
UpperCAmelCase_ = self.to_diff_dict()
else:
UpperCAmelCase_ = self.to_dict()
return json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + "\n"
def lowercase__ ( self : Optional[Any] ) -> Dict[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.to_dict()
# get the default config dict
UpperCAmelCase_ = BitsAndBytesConfig().to_dict()
UpperCAmelCase_ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase_ = value
return serializable_config_dict
| 359 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 241 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCamelCase ( ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__UpperCAmelCase : int = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_UpperCamelCase )
# Let's go
__UpperCAmelCase : str = parser.parse_args()
if not hasattr(_UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : str = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 115 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 115 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( _UpperCAmelCase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowerCAmelCase_ : Tuple = "ssube/stable-diffusion-x4-upscaler-onnx"
def lowerCAmelCase__ ( self , a__=0 ) -> Dict:
'''simple docstring'''
snake_case_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_ ) )
snake_case_ = torch.manual_seed(lowercase_ )
snake_case_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowercase_ ).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowercase_ ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowercase_ ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowercase_ ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowercase_ ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = """A fantasy landscape, trending on artstation"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type="np" , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ = init_image.resize((128, 128) )
snake_case_ = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
snake_case_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = """A fantasy landscape, trending on artstation"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type="np" , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 371 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_SCREAMING_SNAKE_CASE : Any = False
class _snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
image=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 92 | 0 |
"""simple docstring"""
from PIL import Image
def snake_case_ ( A_ : Image, A_ : float ):
'''simple docstring'''
def brightness(A_ : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(A_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowerCAmelCase__ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 72 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72 | 1 |
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> List[Any]:
_snake_case = val
_snake_case = None
_snake_case = None
def lowercase (self , UpperCAmelCase ) -> Tuple:
if self.val:
if val < self.val:
if self.left is None:
_snake_case = Node(UpperCAmelCase )
else:
self.left.insert(UpperCAmelCase )
elif val > self.val:
if self.right is None:
_snake_case = Node(UpperCAmelCase )
else:
self.right.insert(UpperCAmelCase )
else:
_snake_case = val
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Recursive traversal
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Build BST
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
_snake_case = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
_snake_case = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 354 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__lowerCAmelCase = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_snake_case = int(re.match(R""".*layer_(\d*).*""" , _SCREAMING_SNAKE_CASE )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if dtype == torch.bool:
return 1 / 8
_snake_case = re.search(R"""[^\d](\d+)$""" , str(_SCREAMING_SNAKE_CASE ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
_snake_case = int(bit_search.groups()[0] )
return bit_size // 8
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Construct model
if bloom_config_file == "":
_snake_case = BloomConfig()
else:
_snake_case = BloomConfig.from_json_file(_SCREAMING_SNAKE_CASE )
if shard_model:
_snake_case = os.listdir(_SCREAMING_SNAKE_CASE )
_snake_case = sorted(filter(lambda _SCREAMING_SNAKE_CASE : s.startswith("""layer""" ) and "model_00" in s , _SCREAMING_SNAKE_CASE ) )
_snake_case = {"""weight_map""": {}, """metadata""": {}}
_snake_case = 0
_snake_case = None
_snake_case = BloomConfig()
for j, file in enumerate(_SCREAMING_SNAKE_CASE ):
print("""Processing file: {}""".format(_SCREAMING_SNAKE_CASE ) )
_snake_case = None
for i in range(_SCREAMING_SNAKE_CASE ):
# load all TP files
_snake_case = file.replace("""model_00""" , f"""model_0{i}""" )
_snake_case = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , map_location="""cpu""" )
# Rename keys in the transformers names
_snake_case = list(temp.keys() )
for key in keys:
_snake_case = temp.pop(_SCREAMING_SNAKE_CASE )
if tensors is None:
_snake_case = temp
else:
for key in tensors.keys():
if any(key.endswith(_SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_snake_case = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_snake_case = torch.cat([tensors[key], temp[key]] , dim=_SCREAMING_SNAKE_CASE )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_snake_case = tensors[key] / pretraining_tp
torch.save(
_SCREAMING_SNAKE_CASE , os.path.join(
_SCREAMING_SNAKE_CASE , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(_SCREAMING_SNAKE_CASE ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
_snake_case = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_snake_case = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(_SCREAMING_SNAKE_CASE ) ).zfill(5 ) )
_snake_case = BloomConfig()
_snake_case = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_snake_case = total_size
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
_snake_case = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n"""
f.write(_SCREAMING_SNAKE_CASE )
else:
_snake_case = BloomModel(_SCREAMING_SNAKE_CASE )
_snake_case = os.listdir(_SCREAMING_SNAKE_CASE )
_snake_case = sorted(filter(lambda _SCREAMING_SNAKE_CASE : s.startswith("""layer""" ) and "model_00" in s , _SCREAMING_SNAKE_CASE ) )
_snake_case = None
for i, file in enumerate(_SCREAMING_SNAKE_CASE ):
_snake_case = None
for i in range(_SCREAMING_SNAKE_CASE ):
# load all TP files
_snake_case = file.replace("""model_00""" , f"""model_0{i}""" )
_snake_case = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , map_location="""cpu""" )
# Rename keys in the transformers names
_snake_case = list(temp.keys() )
for key in keys:
_snake_case = temp.pop(_SCREAMING_SNAKE_CASE )
if tensors is None:
_snake_case = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_snake_case = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_snake_case = torch.cat([tensors[key], temp[key]] , dim=_SCREAMING_SNAKE_CASE )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_snake_case = tensors[key] / pretraining_tp
_snake_case = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
_snake_case = set(other_keys.missing_keys )
else:
_snake_case = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_snake_case = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_snake_case = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
_snake_case = model.to(config.torch_dtype )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__lowerCAmelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 270 | 0 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = only_cross_attention
SCREAMING_SNAKE_CASE__ : List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE__ : int = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ : Tuple = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
SCREAMING_SNAKE_CASE__ : Any = (
AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
)
SCREAMING_SNAKE_CASE__ : Tuple = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : str = None
# 3. Feed-forward
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ )
# let chunk size default to None
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Dict = 0
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = chunk_size
SCREAMING_SNAKE_CASE__ : str = dim
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> Any:
"""simple docstring"""
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE__ : Any = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.norma(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.norma(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
SCREAMING_SNAKE_CASE__ : List[Any] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ : List[str] = gate_msa.unsqueeze(1 ) * attn_output
SCREAMING_SNAKE_CASE__ : Dict = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
SCREAMING_SNAKE_CASE__ : str = (
self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ )
)
SCREAMING_SNAKE_CASE__ : List[str] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attn_output + hidden_states
# 3. Feed-forward
SCREAMING_SNAKE_CASE__ : Any = self.norma(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
SCREAMING_SNAKE_CASE__ : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
SCREAMING_SNAKE_CASE__ : int = self.ff(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ : Tuple = gate_mlp.unsqueeze(1 ) * ff_output
SCREAMING_SNAKE_CASE__ : Optional[Any] = ff_output + hidden_states
return hidden_states
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = int(dim * mult )
SCREAMING_SNAKE_CASE__ : Tuple = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
SCREAMING_SNAKE_CASE__ : str = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if activation_fn == "gelu-approximate":
SCREAMING_SNAKE_CASE__ : List[str] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate="""tanh""" )
elif activation_fn == "geglu":
SCREAMING_SNAKE_CASE__ : Optional[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif activation_fn == "geglu-approximate":
SCREAMING_SNAKE_CASE__ : int = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE__ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
for module in self.net:
SCREAMING_SNAKE_CASE__ : Dict = module(SCREAMING_SNAKE_CASE__ )
return hidden_states
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = approximate
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.proj(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.gelu(SCREAMING_SNAKE_CASE__ )
return hidden_states
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.proj(SCREAMING_SNAKE_CASE__ )
return x * torch.sigmoid(1.702 * x )
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 )
SCREAMING_SNAKE_CASE__ : List[str] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 )
SCREAMING_SNAKE_CASE__ : Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift
return x
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Any = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = emb.chunk(6 , dim=1 )
SCREAMING_SNAKE_CASE__ : Tuple = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ) -> int:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = num_groups
SCREAMING_SNAKE_CASE__ : Any = eps
if act_fn is None:
SCREAMING_SNAKE_CASE__ : Any = None
else:
SCREAMING_SNAKE_CASE__ : Dict = get_activation(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if self.act:
SCREAMING_SNAKE_CASE__ : Tuple = self.act(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.linear(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = emb[:, :, None, None]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = emb.chunk(2 , dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = x * (1 + scale) + shift
return x
| 25 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A : Optional[List[str]] = None
A : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A : str = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _lowercase :
"""simple docstring"""
A__ = True
A__ = None
# Automatically constructed
A__ = "PIL.Image.Image"
A__ = pa.struct({"bytes": pa.binary(), "path": pa.string()})
A__ = field(default="Image" , init=lowercase__ , repr=lowercase__)
def __call__( self : Any ):
'''simple docstring'''
return self.pa_type
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : str = np.array(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCamelCase )
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCamelCase )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = PIL.Image.open(__lowerCamelCase )
else:
lowerCamelCase__ : Tuple = path.split("::" )[-1]
try:
lowerCamelCase__ : str = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"]
lowerCamelCase__ : Any = token_per_repo_id.get(__lowerCamelCase )
except ValueError:
lowerCamelCase__ : int = None
with xopen(__lowerCamelCase , "rb" , use_auth_token=__lowerCamelCase ) as f:
lowerCamelCase__ : List[str] = BytesIO(f.read() )
lowerCamelCase__ : Optional[int] = PIL.Image.open(bytes_ )
else:
lowerCamelCase__ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
lowerCamelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase__ : List[Any] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Any = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowerCamelCase__ : Dict = storage.field("bytes" )
else:
lowerCamelCase__ : Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowerCamelCase__ : Dict = storage.field("path" )
else:
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCamelCase__ : Union[str, Any] = pa.array(
[encode_np_array(np.array(__lowerCamelCase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowerCAmelCase ( self : int , __lowerCamelCase : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Union[str, Any] ):
with xopen(__lowerCamelCase , "rb" ) as f:
lowerCamelCase__ : str = f.read()
return bytes_
lowerCamelCase__ : List[Any] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase__ : Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowerCamelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowercase_ ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCamelCase__ : List[str] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = BytesIO()
if image.format in list_image_compression_formats():
lowerCamelCase__ : int = image.format
else:
lowerCamelCase__ : int = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_A , format=_A )
return buffer.getvalue()
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
if hasattr(_A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : np.ndarray ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowerCamelCase__ : int = array.dtype
lowerCamelCase__ : List[str] = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowerCamelCase__ : List[str] = dtype.kind
lowerCamelCase__ : Optional[Any] = dtype.itemsize
lowerCamelCase__ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCamelCase__ : List[Any] = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCamelCase__ : Any = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCamelCase__ : Optional[Any] = dtype_byteorder + dtype_kind + str(_A )
lowerCamelCase__ : int = np.dtype(_A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
lowerCamelCase__ : List[Any] = PIL.Image.fromarray(array.astype(_A ) )
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowerCamelCase__ , lowerCamelCase__ : int = first_non_null_value(_A )
if isinstance(_A , _A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_A , np.ndarray ):
lowerCamelCase__ : Optional[Any] = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
elif isinstance(_A , PIL.Image.Image ):
lowerCamelCase__ : int = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
else:
return objs
else:
return objs
| 184 | 0 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """blenderbot-small"""
lowercase_ : List[Any] = ["""past_key_values"""]
lowercase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self, lowerCamelCase=5_02_65, lowerCamelCase=5_12, lowerCamelCase=8, lowerCamelCase=20_48, lowerCamelCase=16, lowerCamelCase=8, lowerCamelCase=20_48, lowerCamelCase=16, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase="gelu", lowerCamelCase=5_12, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1, lowerCamelCase=False, lowerCamelCase=0, lowerCamelCase=1, lowerCamelCase=2, lowerCamelCase=2, **lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Dict = vocab_size
_lowercase : Dict = max_position_embeddings
_lowercase : Optional[int] = d_model
_lowercase : str = encoder_ffn_dim
_lowercase : str = encoder_layers
_lowercase : Any = encoder_attention_heads
_lowercase : Any = decoder_ffn_dim
_lowercase : List[str] = decoder_layers
_lowercase : str = decoder_attention_heads
_lowercase : Dict = dropout
_lowercase : str = attention_dropout
_lowercase : Union[str, Any] = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : Optional[int] = init_std
_lowercase : Any = encoder_layerdrop
_lowercase : Optional[int] = decoder_layerdrop
_lowercase : Optional[Any] = use_cache
_lowercase : Dict = encoder_layers
_lowercase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, decoder_start_token_id=lowerCamelCase, forced_eos_token_id=lowerCamelCase, **lowerCamelCase, )
class _lowerCamelCase( _a ):
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowercase : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
_lowercase : Tuple = {0: 'batch'}
_lowercase : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase : str = {0: 'batch', 1: 'decoder_sequence'}
_lowercase : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase, direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
_lowercase , _lowercase : List[str] = self.num_layers
for i in range(lowerCamelCase):
_lowercase : int = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase : str = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowercase : Any = super().outputs
else:
_lowercase : Tuple = super(lowerCamelCase, self).outputs
if self.use_past:
_lowercase , _lowercase : int = self.num_layers
for i in range(lowerCamelCase):
_lowercase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = -1, lowerCamelCase = -1, lowerCamelCase = False, lowerCamelCase = None, ) -> Mapping[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# Generate decoder inputs
_lowercase : str = seq_length if not self.use_past else 1
_lowercase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowercase : Any = dict(**lowerCamelCase, **lowerCamelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_lowercase , _lowercase : int = common_inputs['input_ids'].shape
_lowercase : Dict = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase : int = self.num_attention_heads
_lowercase : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase : Tuple = decoder_seq_length + 3
_lowercase : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase : int = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCamelCase, lowerCamelCase)], dim=1)
_lowercase : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase : Dict = self.num_layers
_lowercase : List[str] = min(lowerCamelCase, lowerCamelCase)
_lowercase : str = max(lowerCamelCase, lowerCamelCase) - min_num_layers
_lowercase : Optional[int] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCamelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase),
torch.zeros(lowerCamelCase),
torch.zeros(lowerCamelCase),
torch.zeros(lowerCamelCase),
))
# TODO: test this.
_lowercase : List[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCamelCase, lowerCamelCase):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase), torch.zeros(lowerCamelCase)))
return common_inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = -1, lowerCamelCase = -1, lowerCamelCase = False, lowerCamelCase = None, ) -> Mapping[str, Any]:
"""simple docstring"""
_lowercase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_lowercase , _lowercase : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase : Any = seqlen + 2
_lowercase , _lowercase : Dict = self.num_layers
_lowercase , _lowercase : Optional[Any] = self.num_attention_heads
_lowercase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase : Tuple = common_inputs['attention_mask'].dtype
_lowercase : Optional[int] = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCamelCase, lowerCamelCase, dtype=lowerCamelCase)], dim=1)
_lowercase : Any = [
(torch.zeros(lowerCamelCase), torch.zeros(lowerCamelCase)) for _ in range(lowerCamelCase)
]
return common_inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = -1, lowerCamelCase = -1, lowerCamelCase = False, lowerCamelCase = None, ) -> Mapping[str, Any]:
"""simple docstring"""
_lowercase : Tuple = compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase : List[str] = tokenizer.num_special_tokens_to_add(lowerCamelCase)
_lowercase : int = compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase)
# Generate dummy inputs according to compute batch and sequence
_lowercase : Optional[int] = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
_lowercase : List[str] = dict(tokenizer(lowerCamelCase, return_tensors=lowerCamelCase))
return common_inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = -1, lowerCamelCase = -1, lowerCamelCase = False, lowerCamelCase = None, ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowercase : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase)
elif self.task == "causal-lm":
_lowercase : Any = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase)
else:
_lowercase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase)
return common_inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowercase : List[Any] = super()._flatten_past_key_values_(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
else:
_lowercase : Tuple = super(lowerCamelCase, self)._flatten_past_key_values_(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
| 21 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__lowerCamelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.reshape(lowerCamelCase__ , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.asarray(reshape(lowerCamelCase__ , (12, 5) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 90 | 0 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCAmelCase_ : Optional[int] = 2_9_9_7_9_2_4_5_8
# Symbols
lowerCAmelCase_ : Any = symbols('''ct x y z''')
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return 1 / sqrt(1 - beta(lowerCAmelCase ) ** 2 )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return np.array(
[
[gamma(lowerCAmelCase ), -gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), 0, 0],
[-gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), gamma(lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = None ):
'''simple docstring'''
# Ensure event is not empty
if event is None:
UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCAmelCase_ : List[Any] = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(F'ct\' = {four_vector[0]}')
print(F'x\' = {four_vector[1]}')
print(F'y\' = {four_vector[2]}')
print(F'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
lowerCAmelCase_ : Tuple = {ct: c, x: 1, y: 1, z: 1}
lowerCAmelCase_ : int = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'\n{numerical_vector}')
| 369 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = str(lowerCAmelCase )
UpperCAmelCase = [n]
for i in range(1 , len(lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if len(str(lowerCAmelCase ) ) > 3:
if not is_prime(int(str(lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( lowerCAmelCase = 11 ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 13
while len(lowerCAmelCase ) != count:
if validate(lowerCAmelCase ):
UpperCAmelCase = list_truncated_nums(lowerCAmelCase )
if all(is_prime(lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(1_1)) = }')
| 248 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ (_a : Union[str, Any] , _a : List[Any] ):
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def snake_case_ (_a : Dict , _a : Any=False ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ (_a : str , _a : Optional[Any]="no" ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return value
| 34 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCAmelCase : Optional[int] = logging.get_logger('transformers.models.speecht5')
UpperCAmelCase : Union[str, Any] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
UpperCAmelCase : List[Any] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
UpperCAmelCase : Optional[int] = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
UpperCAmelCase : str = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
UpperCAmelCase : str = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
UpperCAmelCase : Dict = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
UpperCAmelCase : str = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
UpperCAmelCase : Tuple = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
UpperCAmelCase : Any = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCAmelCase : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase : List[str] = []
UpperCAmelCase : Union[str, Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
UpperCAmelCase : List[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
UpperCAmelCase : Dict = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
UpperCAmelCase : Dict = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCAmelCase : List[Any] = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__UpperCAmelCase : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase : List[str] = value
elif weight_type == "weight_g":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : Any = value
elif weight_type == "bias":
__UpperCAmelCase : Dict = value
elif weight_type == "running_mean":
__UpperCAmelCase : int = value
elif weight_type == "running_var":
__UpperCAmelCase : Dict = value
elif weight_type == "num_batches_tracked":
__UpperCAmelCase : Optional[Any] = value
else:
__UpperCAmelCase : Optional[int] = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
if task == "s2t":
__UpperCAmelCase : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCAmelCase : Union[str, Any] = MAPPING_S2T
__UpperCAmelCase : Optional[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Optional[int] = MAPPING_T2S
__UpperCAmelCase : str = IGNORE_KEYS_T2S
elif task == "s2s":
__UpperCAmelCase : str = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCAmelCase : Dict = MAPPING_S2S
__UpperCAmelCase : int = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_UpperCamelCase , _UpperCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
__UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = key.split(""".*.""" )
if prefix in name and suffix in name:
__UpperCAmelCase : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__UpperCAmelCase : str = True
if "*" in mapped_key:
__UpperCAmelCase : Dict = name.split(_UpperCamelCase )[0].split(""".""" )[-2]
__UpperCAmelCase : int = mapped_key.replace("""*""" , _UpperCamelCase )
if "weight_g" in name:
__UpperCAmelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : Optional[int] = """weight_v"""
elif "bias" in name:
__UpperCAmelCase : Optional[Any] = """bias"""
elif "weight" in name:
__UpperCAmelCase : Any = """weight"""
elif "running_mean" in name:
__UpperCAmelCase : Union[str, Any] = """running_mean"""
elif "running_var" in name:
__UpperCAmelCase : Tuple = """running_var"""
elif "num_batches_tracked" in name:
__UpperCAmelCase : Any = """num_batches_tracked"""
else:
__UpperCAmelCase : str = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : Union[str, Any] = name.split(""".""" )
__UpperCAmelCase : Optional[Any] = int(items[0] )
__UpperCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase : str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : str = SpeechTaConfig.from_pretrained(_UpperCamelCase )
else:
__UpperCAmelCase : int = SpeechTaConfig()
if task == "s2t":
__UpperCAmelCase : Optional[int] = config.max_text_positions
__UpperCAmelCase : str = SpeechTaForSpeechToText(_UpperCamelCase )
elif task == "t2s":
__UpperCAmelCase : Optional[Any] = 1_8_7_6
__UpperCAmelCase : Optional[int] = 6_0_0
__UpperCAmelCase : List[str] = config.max_speech_positions
__UpperCAmelCase : str = SpeechTaForTextToSpeech(_UpperCamelCase )
elif task == "s2s":
__UpperCAmelCase : int = 1_8_7_6
__UpperCAmelCase : int = config.max_speech_positions
__UpperCAmelCase : Union[str, Any] = SpeechTaForSpeechToSpeech(_UpperCamelCase )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
__UpperCAmelCase : Optional[Any] = SpeechTaTokenizer(_UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__UpperCAmelCase : int = AddedToken("""<mask>""" , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
__UpperCAmelCase : Optional[int] = SpeechTaFeatureExtractor()
__UpperCAmelCase : Tuple = SpeechTaProcessor(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = torch.load(_UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["""model"""] , _UpperCamelCase , _UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the ๐ค hub.'
)
UpperCAmelCase : Dict = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 320 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""pixel_values"""]
def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
__UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" )
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Optional[Any] = resample
__UpperCAmelCase : Any = do_center_crop
__UpperCAmelCase : int = crop_size
__UpperCAmelCase : Optional[int] = do_rescale
__UpperCAmelCase : List[Any] = rescale_factor
__UpperCAmelCase : Tuple = do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase : List[Any] = do_convert_rgb
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ):
'''simple docstring'''
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ):
'''simple docstring'''
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Dict = size if size is not None else self.size
__UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase )
__UpperCAmelCase : Dict = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
__UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
__UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
__UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
__UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
__UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
__UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
__UpperCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 320 | 1 |
def lowercase ( SCREAMING_SNAKE_CASE__ : list ) -> list:
_snake_case : Any = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
_snake_case : List[Any] = collection[i]
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = i - 1
while low <= high:
_snake_case : int = (low + high) // 2
if val < collection[mid]:
_snake_case : int = mid - 1
else:
_snake_case : Union[str, Any] = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
_snake_case : Any = collection[j - 1]
_snake_case : Any = val
return collection
if __name__ == "__main__":
a__ = input("""Enter numbers separated by a comma:\n""").strip()
a__ = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 317 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
snake_case_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
snake_case_ : str = "question"
snake_case_ : str = "context"
snake_case_ : str = "answers"
@property
def UpperCamelCase_ ( self : Any) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'OwlViTImageProcessor'
lowercase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str] , a : List[str]=None , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
lowerCAmelCase__ : Optional[int] = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Union[str, Any] , a : Dict=None , a : Tuple=None , a : str=None , a : Optional[int]="max_length" , a : Union[str, Any]="np" , **a : List[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(lowercase_ , lowercase_ ) or (isinstance(lowercase_ , lowercase_ ) and not isinstance(text[0] , lowercase_ )):
lowerCAmelCase__ : List[Any] = [self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )]
elif isinstance(lowercase_ , lowercase_ ) and isinstance(text[0] , lowercase_ ):
lowerCAmelCase__ : List[Any] = []
# Maximum number of queries across batch
lowerCAmelCase__ : Union[str, Any] = max([len(lowercase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowercase_ ) != max_num_queries:
lowerCAmelCase__ : Optional[int] = t + [' '] * (max_num_queries - len(lowercase_ ))
lowerCAmelCase__ : Union[str, Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
encodings.append(lowercase_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowerCAmelCase__ : List[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCAmelCase__ : List[str] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase__ : Tuple = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCAmelCase__ : Tuple = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase__ : List[str] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowerCAmelCase__ : Any = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase__ : int = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCAmelCase__ : List[Any] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowerCAmelCase__ : List[str] = BatchEncoding()
lowerCAmelCase__ : Any = input_ids
lowerCAmelCase__ : int = attention_mask
if query_images is not None:
lowerCAmelCase__ : Optional[Any] = BatchEncoding()
lowerCAmelCase__ : Optional[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ ).pixel_values
lowerCAmelCase__ : Union[str, Any] = query_pixel_values
if images is not None:
lowerCAmelCase__ : List[Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
lowerCAmelCase__ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase__ : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def _lowerCamelCase ( self : List[Any] , *a : str , **a : int ):
'''simple docstring'''
return self.image_processor.post_process(*lowercase_ , **lowercase_ )
def _lowerCamelCase ( self : Dict , *a : str , **a : List[str] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*lowercase_ , **lowercase_ )
def _lowerCamelCase ( self : Union[str, Any] , *a : Dict , **a : Dict ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*lowercase_ , **lowercase_ )
def _lowerCamelCase ( self : str , *a : int , **a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowerCamelCase ( self : List[str] , *a : Optional[int] , **a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 369 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , a__ ):
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Any = load_tool("text-classification" )
self.tool.setup()
__lowerCamelCase : Optional[int] = load_tool("text-classification" , remote=UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : str = self.tool("That\'s quite cool" , ["positive", "negative"] )
self.assertEqual(UpperCAmelCase , "positive" )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Any = self.remote_tool("That\'s quite cool" , ["positive", "negative"] )
self.assertEqual(UpperCAmelCase , "positive" )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Optional[int] = self.tool(text="That\'s quite cool" , labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase , "positive" )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[str] = self.remote_tool(text="That\'s quite cool" , labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase , "positive" )
| 135 |
def a ( A__ : str , A__ : bool = False ) -> str:
"""simple docstring"""
if not isinstance(A__ , A__ ):
_lowercase =F'''Expected string as input, found {type(A__ )}'''
raise ValueError(A__ )
if not isinstance(A__ , A__ ):
_lowercase =F'''Expected boolean as use_pascal parameter, found {type(A__ )}'''
raise ValueError(A__ )
_lowercase =input_str.split('_' )
_lowercase =0 if use_pascal else 1
_lowercase =words[start_index:]
_lowercase =[word[0].upper() + word[1:] for word in words_to_capitalize]
_lowercase ='' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 205 | 0 |
"""simple docstring"""
import numpy as np
class UpperCAmelCase_ :
def __init__( self : List[str] ) -> Optional[int]:
_UpperCamelCase = (0, 0)
_UpperCamelCase = None
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 0
def __eq__( self : List[str] , __UpperCamelCase : List[Any] ) -> Optional[int]:
return self.position == cell.position
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
print(self.position )
class UpperCAmelCase_ :
def __init__( self : Any , __UpperCamelCase : Union[str, Any]=(5, 5) ) -> Optional[int]:
_UpperCamelCase = np.zeros(__UpperCamelCase )
_UpperCamelCase = world_size[0]
_UpperCamelCase = world_size[1]
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
print(self.w )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Any ) -> Dict:
_UpperCamelCase = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_UpperCamelCase = cell.position[0]
_UpperCamelCase = cell.position[1]
_UpperCamelCase = []
for n in neughbour_cord:
_UpperCamelCase = current_x + n[0]
_UpperCamelCase = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_UpperCamelCase = Cell()
_UpperCamelCase = (x, y)
_UpperCamelCase = cell
neighbours.append(__UpperCamelCase )
return neighbours
def lowercase ( a__ : Dict , a__ : List[Any] , a__ : List[Any] ) -> List[str]:
_UpperCamelCase = []
_UpperCamelCase = []
_open.append(a__ )
while _open:
_UpperCamelCase = np.argmin([n.f for n in _open] )
_UpperCamelCase = _open[min_f]
_closed.append(_open.pop(a__ ) )
if current == goal:
break
for n in world.get_neigbours(a__ ):
for c in _closed:
if c == n:
continue
_UpperCamelCase = current.g + 1
_UpperCamelCase , _UpperCamelCase = n.position
_UpperCamelCase , _UpperCamelCase = goal.position
_UpperCamelCase = (ya - ya) ** 2 + (xa - xa) ** 2
_UpperCamelCase = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a__ )
_UpperCamelCase = []
while current.parent is not None:
path.append(current.position )
_UpperCamelCase = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase = Gridworld()
# Start position and goal
UpperCAmelCase = Cell()
UpperCAmelCase = (0, 0)
UpperCAmelCase = Cell()
UpperCAmelCase = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
UpperCAmelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase = 1
print(world.w)
| 366 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str=13 , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=True , __UpperCamelCase : str=99 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : str=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=512 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : List[Any]=4 , ) -> Optional[int]:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self : List[Any] ) -> Any:
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = True
snake_case__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
_UpperCamelCase = FlaxRoFormerModelTester(self )
@slow
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__UpperCamelCase )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
_UpperCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(__UpperCamelCase )[0]
_UpperCamelCase = 5_0000
_UpperCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCamelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 54 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE :Tuple = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->Tuple:
'''simple docstring'''
snake_case_ = test_results.split(" " )
snake_case_ = 0
snake_case_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case_ = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->Union[str, Any]:
'''simple docstring'''
snake_case_ = {}
snake_case_ = None
snake_case_ = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , UpperCamelCase__ ):
snake_case_ = True
snake_case_ = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
snake_case_ = line
snake_case_ = False
return failures
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
snake_case_ = title
snake_case_ = doc_test_results["""time_spent"""].split("," )[0]
snake_case_ = doc_test_results["""success"""]
snake_case_ = doc_test_results["""failures"""]
snake_case_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case_ = doc_test_results
@property
def lowerCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
snake_case_ = [self._time_spent]
snake_case_ = 0
for time in time_spent:
snake_case_ = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCAmelCase__ ) == 1:
snake_case_ = [0, 0, time_parts[0]]
snake_case_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
snake_case_ = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return F'''{int(UpperCAmelCase__ )}h{int(UpperCAmelCase__ )}m{int(UpperCAmelCase__ )}s'''
@property
def lowerCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''๐ There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ = 4_0
snake_case_ = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )}
snake_case_ = """"""
for category, failures in category_failures.items():
if len(UpperCAmelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCAmelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCAmelCase__ )
@staticmethod
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
snake_case_ = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(UpperCAmelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
snake_case_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
snake_case_ = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = """"""
for key, value in failures.items():
snake_case_ = value[:2_0_0] + """ [Truncated]""" if len(UpperCAmelCase__ ) > 2_5_0 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
snake_case_ = job_name
snake_case_ = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
snake_case_ = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
snake_case_ = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
snake_case_ = sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
snake_case_ = F'''*Num failures* :{len(job_result['failed'] )} \n'''
snake_case_ = job_result["""failures"""]
snake_case_ = self.get_reply_blocks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , text=UpperCAmelCase__ )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'''Results for {job}''' , blocks=UpperCAmelCase__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCAmelCase ( )->Optional[int]:
'''simple docstring'''
snake_case_ = os.environ["""GITHUB_RUN_ID"""]
snake_case_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
snake_case_ = requests.get(UpperCamelCase__ ).json()
snake_case_ = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(UpperCamelCase__ ):
snake_case_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , UpperCamelCase__ )
return {}
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->Optional[Any]:
'''simple docstring'''
snake_case_ = {}
if os.path.exists(UpperCamelCase__ ):
snake_case_ = os.listdir(UpperCamelCase__ )
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , encoding="utf-8" ) as f:
snake_case_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__ )}.''' ) from e
return _artifact
def _lowerCAmelCase ( )->int:
'''simple docstring'''
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , _lowerCAmelCase : str ) -> int:
"""simple docstring"""
snake_case_ = name
snake_case_ = []
def __str__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.name
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : str ) -> str:
"""simple docstring"""
self.paths.append({"name": self.name, "path": path} )
snake_case_ = {}
snake_case_ = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case_ = directory
if artifact_name not in _available_artifacts:
snake_case_ = Artifact(UpperCamelCase__ )
_available_artifacts[artifact_name].add_path(UpperCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = get_job_links()
SCREAMING_SNAKE_CASE :Optional[int] = retrieve_available_artifacts()
SCREAMING_SNAKE_CASE :Dict = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
SCREAMING_SNAKE_CASE :str = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
SCREAMING_SNAKE_CASE :Any = github_actions_job_links.get('''run_doctests''')
SCREAMING_SNAKE_CASE :List[str] = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
SCREAMING_SNAKE_CASE :str = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :str = handle_test_results(artifact['''stats'''])
SCREAMING_SNAKE_CASE :List[Any] = failed
SCREAMING_SNAKE_CASE :Tuple = success
SCREAMING_SNAKE_CASE :Optional[Any] = time_spent[1:-1] + ''', '''
SCREAMING_SNAKE_CASE :Union[str, Any] = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
SCREAMING_SNAKE_CASE :List[str] = line.replace('''FAILED ''', '''''')
SCREAMING_SNAKE_CASE :List[str] = line.split()[0].replace('''\n''', '''''')
if "::" in line:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Dict = line.split('''::''')
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
SCREAMING_SNAKE_CASE :Optional[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
SCREAMING_SNAKE_CASE :Any = all_failures[test] if test in all_failures else '''N/A'''
SCREAMING_SNAKE_CASE :Optional[Any] = failure
break
SCREAMING_SNAKE_CASE :Optional[int] = Message('''๐ค Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 159 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_SCREAMING_SNAKE_CASE = True
except (ImportError, AttributeError):
_SCREAMING_SNAKE_CASE = object
def snake_case ( *snake_case__ :Optional[int] , **snake_case__ :Any) -> int:
pass
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/serving')
def snake_case ( snake_case__ :Namespace) -> Dict:
_A = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(snake_case__ , args.host , args.port , args.workers)
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :dict
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str]
lowerCamelCase :Optional[List[int]]
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Any:
_A = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=lowerCAmelCase_ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=lowerCAmelCase_ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=lowerCAmelCase_ , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=lowerCAmelCase_ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=lowerCAmelCase_ , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=lowerCAmelCase_ , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=lowerCAmelCase_ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=lowerCAmelCase_ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = pipeline
_A = host
_A = port
_A = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F'''Serving model over {host}:{port}''' )
_A = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
] , timeout=6_00 , )
def UpperCAmelCase ( self ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCAmelCase ( self ) -> Union[str, Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> List[Any]:
try:
_A = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
_A = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} )
def UpperCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Dict:
try:
_A = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model="""""" , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} )
async def UpperCAmelCase ( self , lowerCAmelCase_=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Any:
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_A = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(lowerCAmelCase_ )} )
| 81 | 1 |
def a_ ( __lowercase : int = 1_000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 282 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 282 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Tuple:
SCREAMING_SNAKE_CASE = original_name.split("." )[0]
SCREAMING_SNAKE_CASE = key.split("." )
SCREAMING_SNAKE_CASE = int(key_list[key_list.index(snake_case_ ) - 2] )
SCREAMING_SNAKE_CASE = int(key_list[key_list.index(snake_case_ ) - 1] )
SCREAMING_SNAKE_CASE = orig_block_num - offset
SCREAMING_SNAKE_CASE = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def __lowerCamelCase (UpperCAmelCase__ : str ) -> str:
SCREAMING_SNAKE_CASE = OrderedDict()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
SCREAMING_SNAKE_CASE = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE = key[: key.find("proj" )]
SCREAMING_SNAKE_CASE = key.replace(snake_case_ , F"patch_embeddings.{total_embed_found}." )
SCREAMING_SNAKE_CASE = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE = "poolformer.encoder." + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case_ , snake_case_ , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case_ , snake_case_ , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case_ , snake_case_ , "norm1" , "before_norm" )
if "norm2" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case_ , snake_case_ , "norm2" , "after_norm" )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case_ , snake_case_ , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case_ , snake_case_ , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
SCREAMING_SNAKE_CASE = key.replace("head" , "classifier" )
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __lowerCamelCase () -> Any:
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return image
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = model_name[-3:]
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = (1, 1_0_0_0)
# set config attributes
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(snake_case_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE = [6_4, 1_2_8, 3_2_0, 5_1_2]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE = [4, 4, 1_2, 4]
SCREAMING_SNAKE_CASE = [6_4, 1_2_8, 3_2_0, 5_1_2]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE = [6, 6, 1_8, 6]
SCREAMING_SNAKE_CASE = [6_4, 1_2_8, 3_2_0, 5_1_2]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 1e-6
SCREAMING_SNAKE_CASE = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE = [6, 6, 1_8, 6]
SCREAMING_SNAKE_CASE = [9_6, 1_9_2, 3_8_4, 7_6_8]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 1e-6
SCREAMING_SNAKE_CASE = 0.95
elif size == "m48":
SCREAMING_SNAKE_CASE = [8, 8, 2_4, 8]
SCREAMING_SNAKE_CASE = [9_6, 1_9_2, 3_8_4, 7_6_8]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 1e-6
SCREAMING_SNAKE_CASE = 0.95
else:
raise ValueError(F"Size {size} not supported" )
# load image processor
SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=snake_case_ )
# Prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=snake_case_ , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
SCREAMING_SNAKE_CASE = torch.load(snake_case_ , map_location=torch.device("cpu" ) )
# rename keys
SCREAMING_SNAKE_CASE = rename_keys(snake_case_ )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = PoolFormerForImageClassification(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=snake_case_ )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE = model(snake_case_ )
SCREAMING_SNAKE_CASE = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
SCREAMING_SNAKE_CASE = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
SCREAMING_SNAKE_CASE = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
SCREAMING_SNAKE_CASE = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
SCREAMING_SNAKE_CASE = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 368 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_lowerCamelCase : Union[str, Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
def __init__( self : int , _UpperCamelCase : bool = True ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} # dictionary of lists
SCREAMING_SNAKE_CASE = directed
def __snake_case( self : int , _UpperCamelCase : T , _UpperCamelCase : T ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCamelCase )
self.adj_list[destination_vertex].append(_UpperCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
SCREAMING_SNAKE_CASE = [destination_vertex]
SCREAMING_SNAKE_CASE = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
SCREAMING_SNAKE_CASE = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
SCREAMING_SNAKE_CASE = [destination_vertex]
SCREAMING_SNAKE_CASE = []
return self
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 206 | 0 |
from __future__ import annotations
import math
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> None:
__UpperCamelCase =size
# approximate the overall size of segment tree with given value
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
__UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self , A_ ) -> int:
return idx * 2
def _a ( self , A_ ) -> int:
return idx * 2 + 1
def _a ( self , A_ , A_ , A_ , A_ ) -> None:
if left_element == right_element:
__UpperCamelCase =a[left_element - 1]
else:
__UpperCamelCase =(left_element + right_element) // 2
self.build(self.left(A_ ) , A_ , A_ , A_ )
self.build(self.right(A_ ) , mid + 1 , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__UpperCamelCase =val
if left_element != right_element:
__UpperCamelCase =val
__UpperCamelCase =val
__UpperCamelCase =True
__UpperCamelCase =True
return True
__UpperCamelCase =(left_element + right_element) // 2
self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ )
self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
return True
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__UpperCamelCase =(left_element + right_element) // 2
__UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ )
__UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ )
return max(A_ , A_ )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_A = 15
_A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 62 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case__ = []
for i in range(_A ):
snake_case__ = i / num_diffusion_timesteps
snake_case__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE( a_ , a_ ):
_UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase = 2
@register_to_config
def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str:
if trained_betas is not None:
snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case__ = 1.0 - self.betas
snake_case__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = use_karras_sigmas
def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str:
if schedule_timesteps is None:
snake_case__ = self.timesteps
snake_case__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0
else:
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
snake_case__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
snake_case__ = self.index_for_timestep(UpperCamelCase )
snake_case__ = self.sigmas[step_index]
snake_case__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str:
snake_case__ = num_inference_steps
snake_case__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case__ = np.log(UpperCamelCase )
snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase )
if self.config.use_karras_sigmas:
snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps )
snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] )
snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase )
snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case__ = torch.from_numpy(UpperCamelCase )
snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCamelCase ).startswith('mps' ):
# mps does not support float64
snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa )
else:
snake_case__ = timesteps.to(device=UpperCamelCase )
# empty dt and derivative
snake_case__ = None
snake_case__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case__ = defaultdict(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple:
# get log sigma
snake_case__ = np.log(UpperCamelCase )
# get distribution
snake_case__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case__ = low_idx + 1
snake_case__ = log_sigmas[low_idx]
snake_case__ = log_sigmas[high_idx]
# interpolate sigmas
snake_case__ = (low - log_sigma) / (low - high)
snake_case__ = np.clip(UpperCamelCase , 0 , 1 )
# transform interpolation to time range
snake_case__ = (1 - w) * low_idx + w * high_idx
snake_case__ = t.reshape(sigma.shape )
return t
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor:
snake_case__ = in_sigmas[-1].item()
snake_case__ = in_sigmas[0].item()
snake_case__ = 7.0 # 7.0 is the value used in the paper
snake_case__ = np.linspace(0 , 1 , UpperCamelCase )
snake_case__ = sigma_min ** (1 / rho)
snake_case__ = sigma_max ** (1 / rho)
snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
return self.dt is None
def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]:
snake_case__ = self.index_for_timestep(UpperCamelCase )
# advance index counter by 1
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case__ = self.sigmas[step_index]
snake_case__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case__ = self.sigmas[step_index - 1]
snake_case__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case__ = 0
snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
snake_case__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case__ = sigma_next - sigma_hat
# store for 2nd order step
snake_case__ = derivative
snake_case__ = dt
snake_case__ = sample
else:
# 2. 2nd order / Heun's method
snake_case__ = (sample - pred_original_sample) / sigma_next
snake_case__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case__ = self.dt
snake_case__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ):
# mps does not support float64
snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case__ = self.timesteps.to(original_samples.device )
snake_case__ = timesteps.to(original_samples.device )
snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps]
snake_case__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case__ = sigma.unsqueeze(-1 )
snake_case__ = original_samples + noise * sigma
return noisy_samples
def __len__( self: List[Any] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 307 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaInpaintPipeline
__SCREAMING_SNAKE_CASE : int = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
__SCREAMING_SNAKE_CASE : str = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[str] = False
@property
def _a (self ):
return 32
@property
def _a (self ):
return 32
@property
def _a (self ):
return self.time_input_dim
@property
def _a (self ):
return self.time_input_dim * 4
@property
def _a (self ):
return 100
@property
def _a (self ):
torch.manual_seed(0 )
A_ : str = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : str = UNetaDConditionModel(**lowercase )
return model
@property
def _a (self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a (self ):
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a (self ):
A_ : Optional[Any] = self.dummy_unet
A_ : Dict = self.dummy_movq
A_ : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowercase , )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
A_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Tuple = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
A_ : List[str] = np.ones((64, 64) , dtype=np.floataa )
A_ : Any = 0
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Dict = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Any = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _a (self ):
A_ : List[str] = """cpu"""
A_ : str = self.get_dummy_components()
A_ : Any = self.pipeline_class(**lowercase )
A_ : List[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase ) )
A_ : List[str] = output.images
A_ : Union[str, Any] = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
A_ : List[str] = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
A_ : Optional[int] = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def _a (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
A_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ : Dict = np.ones((768, 768) , dtype=np.floataa )
A_ : Any = 0
A_ : str = """a hat"""
A_ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
A_ : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
A_ : str = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
A_ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ : Dict = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A_ : List[Any] = pipeline(
image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
A_ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 356 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=128 , lowercase=32 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : Union[str, Any] = parent
A_ : Optional[int] = batch_size
A_ : Any = seq_length
A_ : int = is_training
A_ : List[str] = use_input_mask
A_ : Any = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Dict = vocab_size
A_ : Optional[int] = hidden_size
A_ : int = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Dict = intermediate_size
A_ : List[str] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : List[Any] = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : List[Any] = num_labels
A_ : str = num_choices
A_ : Tuple = scope
def _a (self ):
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Tuple = None
if self.use_input_mask:
A_ : str = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Any = None
if self.use_token_type_ids:
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : Any = None
A_ : List[Any] = None
if self.use_labels:
A_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : int = ids_tensor([self.batch_size] , self.num_choices )
A_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _a (self ):
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Union[str, Any] = self.prepare_config_and_inputs()
A_ : Union[str, Any] = True
A_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Union[str, Any] = NezhaModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
A_ : Optional[Any] = model(lowercase , token_type_ids=lowercase )
A_ : str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
A_ : Optional[int] = True
A_ : Optional[Any] = NezhaModel(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
A_ : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , )
A_ : Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = NezhaForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Tuple = NezhaForNextSentencePrediction(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : int = NezhaForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
A_ : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Any = NezhaForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = self.num_labels
A_ : int = NezhaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[str] = self.num_labels
A_ : Optional[int] = NezhaForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : int = self.num_choices
A_ : int = NezhaForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Optional[int] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a (self ):
A_ : Tuple = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : int = config_and_inputs
A_ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[Any] = True
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : Optional[Any] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
A_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
A_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _a (self ):
A_ : Optional[int] = NezhaModelTester(self )
A_ : Any = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def _a (self ):
# This regression test was failing with PyTorch < 1.3
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ : str = None
self.model_tester.create_and_check_model_as_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def _a (self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Any = NezhaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A_ : Optional[int] = True
A_ : str = model_class(config=lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase )
A_ : Tuple = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """bert.pt""" ) )
A_ : List[str] = torch.jit.load(os.path.join(lowercase , """bert.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Dict = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
A_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ : Optional[int] = model(lowercase , attention_mask=lowercase )[0]
A_ : Optional[int] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowercase )
A_ : List[Any] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
A_ : str = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
A_ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ : str = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ : Tuple = model(lowercase , attention_mask=lowercase )[0]
A_ : str = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , lowercase )
A_ : List[Any] = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
| 135 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : List[str] = """\
"""
_lowerCamelCase : List[Any] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_lowerCamelCase : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string'''),
}) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Any]=None) ->int:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = '''cuda'''
else:
A__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
A__ = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__)
A__ = model.to(UpperCAmelCase__)
A__ = AutoTokenizer.from_pretrained(UpperCAmelCase__)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(UpperCAmelCase__) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''pt''' , return_attention_mask=UpperCAmelCase__ , ).to(UpperCAmelCase__)
A__ = encodings['''input_ids''']
A__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction='''none''')
for start_index in logging.tqdm(range(0 , len(UpperCAmelCase__) , UpperCAmelCase__)):
A__ = min(start_index + batch_size , len(UpperCAmelCase__))
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(UpperCAmelCase__)
A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(UpperCAmelCase__), attn_mask] , dim=1)
A__ = encoded_batch
with torch.no_grad():
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , UpperCAmelCase__) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase__)}
| 14 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A__ = '''http://www.mocksite.com/file1.txt'''
A__ = '''"text": ["foo", "foo"]'''
A__ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class a :
__lowerCAmelCase : Optional[int] = 2_00
__lowerCAmelCase : List[str] = {"""Content-Length""": """100"""}
__lowerCAmelCase : Dict = {}
def __lowerCamelCase ( self :Dict ,**__lowercase :List[Any] ):
return [bytes(__lowercase ,'''utf-8''' )]
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(__lowerCAmelCase , '''request''' , __lowerCAmelCase )
snake_case__ : Union[str, Any] = URL
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Optional[Any] = url
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = [url]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = {'''train''': url}
snake_case__ : Dict = '''dummy'''
snake_case__ : Any = '''downloads'''
snake_case__ : int = tmp_path
snake_case__ : Any = DownloadConfig(
cache_dir=os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , use_etag=__lowerCAmelCase , )
snake_case__ : Tuple = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
snake_case__ : List[Any] = dl_manager.download(__lowerCAmelCase )
snake_case__ : Union[str, Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Optional[int] = [downloaded_paths]
snake_case__ : Dict = [urls]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
snake_case__ : str = downloaded_paths.values()
snake_case__ : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
snake_case__ : List[Any] = Path(__lowerCAmelCase )
snake_case__ : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
snake_case__ : List[str] = downloaded_path.read_text()
assert content == CONTENT
snake_case__ : List[str] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
snake_case__ : str = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : Any = str(__lowerCAmelCase )
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Tuple = filename
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = [filename]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = {'''train''': filename}
snake_case__ : Any = '''dummy'''
snake_case__ : Any = xz_file.parent
snake_case__ : List[str] = '''extracted'''
snake_case__ : Dict = DownloadConfig(
cache_dir=__lowerCAmelCase , use_etag=__lowerCAmelCase , )
snake_case__ : Dict = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
snake_case__ : str = dl_manager.extract(__lowerCAmelCase )
snake_case__ : int = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = [extracted_paths]
snake_case__ : Optional[Any] = [paths]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in extracted_paths.keys()
snake_case__ : int = extracted_paths.values()
snake_case__ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
snake_case__ : Optional[int] = Path(__lowerCAmelCase )
snake_case__ : int = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCAmelCase , etag=__lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
snake_case__ : List[Any] = extracted_path.read_text()
snake_case__ : List[str] = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCAmelCase , start=1 ):
snake_case__ : Any = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = request.getfixturevalue(__lowerCAmelCase )
snake_case__ : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = request.getfixturevalue(__lowerCAmelCase )
snake_case__ : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCAmelCase ) , start=1 ):
assert os.path.basename(__lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 230 | 0 |
"""simple docstring"""
from manim import *
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""CPU""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(4 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""GPU""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""Model""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[]
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase =Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
cpu_targs.append(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , aligned_edge=__UpperCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>โ</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =MarkupText(
f'''<span fgcolor=\'{BLUE}\'>โ</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase =MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) , Write(__UpperCAmelCase ) )
self.play(Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i, rect in enumerate(__UpperCAmelCase ):
_lowerCAmelCase =fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
first_animations.append(GrowFromCenter(__UpperCAmelCase , run_time=1 ) )
_lowerCAmelCase =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(*__UpperCAmelCase )
self.wait()
| 357 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _UpperCAmelCase (UpperCamelCase__ : dict ):
return (data["data"], data["target"])
def _UpperCAmelCase (UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ):
_A : str = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase__ , UpperCamelCase__ )
# Predict target for test data
_A : int = xgb.predict(UpperCamelCase__ )
_A : int = predictions.reshape(len(UpperCamelCase__ ) , 1 )
return predictions
def _UpperCAmelCase ():
_A : List[str] = fetch_california_housing()
_A , _A : Tuple = data_handling(UpperCamelCase__ )
_A , _A , _A , _A : Optional[Any] = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.25 , random_state=1 )
_A : List[str] = xgboost(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(UpperCamelCase__ , UpperCamelCase__ )}" )
print(f"Mean Square Error : {mean_squared_error(UpperCamelCase__ , UpperCamelCase__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 365 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325 | 0 |
class _A:
"""simple docstring"""
def __init__( self , _A , _A ):
__A : Optional[int] = name
__A : List[str] = val
def __str__( self ):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , _A ):
return self.val < other.val
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : List[Any] = {}
__A : List[Any] = {}
__A : str = self.build_heap(_A )
def __getitem__( self , _A ):
return self.get_value(_A )
def UpperCAmelCase_ ( self , _A ):
return (idx - 1) // 2
def UpperCAmelCase_ ( self , _A ):
return idx * 2 + 1
def UpperCAmelCase_ ( self , _A ):
return idx * 2 + 2
def UpperCAmelCase_ ( self , _A ):
return self.heap_dict[key]
def UpperCAmelCase_ ( self , _A ):
__A : List[str] = len(_A ) - 1
__A : Optional[Any] = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
__A : Union[str, Any] = idx
__A : Any = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def UpperCAmelCase_ ( self , _A , _A ):
while True:
__A : Any = self.get_left_child_idx(_A ) # noqa: E741
__A : Any = self.get_right_child_idx(_A )
__A : Dict = idx
if l < len(_A ) and array[l] < array[idx]:
__A : Any = l
if r < len(_A ) and array[r] < array[smallest]:
__A : Tuple = r
if smallest != idx:
__A , __A : Optional[int] = array[smallest], array[idx]
(
(
__A
) , (
__A
) ,
) : Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__A : str = smallest
else:
break
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
__A , __A : List[Any] = self.heap[idx], self.heap[p]
__A , __A : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__A : List[Any] = p
__A : int = self.get_parent_idx(_A )
def UpperCAmelCase_ ( self ):
return self.heap[0]
def UpperCAmelCase_ ( self ):
__A , __A : Any = self.heap[-1], self.heap[0]
__A , __A : Any = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__A : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCAmelCase_ ( self , _A ):
self.heap.append(_A )
__A : Any = len(self.heap ) - 1
__A : str = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCAmelCase_ ( self ):
return len(self.heap ) == 0
def UpperCAmelCase_ ( self , _A , _A ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__A : Dict = new_value
__A : int = new_value
self.sift_up(self.idx_of_element[node] )
UpperCAmelCase : Union[str, Any] = Node('''R''', -1)
UpperCAmelCase : Optional[int] = Node('''B''', 6)
UpperCAmelCase : Optional[Any] = Node('''A''', 3)
UpperCAmelCase : int = Node('''X''', 1)
UpperCAmelCase : List[Any] = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCAmelCase : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case_( A__ ):
__UpperCamelCase = 'realm'
def __init__( self : Tuple , UpperCamelCase_ : List[Any]=3_0_5_2_2 , UpperCamelCase_ : Any=7_6_8 , UpperCamelCase_ : str=1_2_8 , UpperCamelCase_ : Tuple=1_2 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : str=8 , UpperCamelCase_ : Tuple=3_0_7_2 , UpperCamelCase_ : List[str]="gelu_new" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Optional[Any]=5_1_2 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Union[str, Any]=2_5_6 , UpperCamelCase_ : Optional[int]=1_0 , UpperCamelCase_ : Optional[int]=1E-3 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : List[Any]=3_2_0 , UpperCamelCase_ : Dict=1_3_3_5_3_7_1_8 , UpperCamelCase_ : Optional[Any]=5_0_0_0 , UpperCamelCase_ : Optional[Any]=1 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : str=2 , **UpperCamelCase_ : Tuple , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
# Common config
lowerCAmelCase : str = vocab_size
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Union[str, Any] = retriever_proj_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : List[str] = num_candidates
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : str = initializer_range
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : List[Any] = layer_norm_eps
# Reader config
lowerCAmelCase : str = span_hidden_size
lowerCAmelCase : int = max_span_width
lowerCAmelCase : Dict = reader_layer_norm_eps
lowerCAmelCase : Tuple = reader_beam_size
lowerCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
lowerCAmelCase : str = num_block_records
lowerCAmelCase : Union[str, Any] = searcher_beam_size
| 357 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ):
lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ )
lowerCAmelCase : Any = nsfw_detected.flatten()
lowerCAmelCase : Dict = nsfw_detected > p_threshold
lowerCAmelCase : int = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowerCAmelCase : List[Any] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Union[str, Any] = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 314 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
_UpperCAmelCase ,_UpperCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
_UpperCAmelCase = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
_UpperCAmelCase = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_UpperCAmelCase = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 140 |
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase )
A_ : int = self.values[key]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
sum(self.charge_factor - len(lowercase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase ) == 0
):
return key
return super()._collision_resolution(lowercase , lowercase )
| 140 | 1 |
"""simple docstring"""
from collections import deque
class __a :
"""simple docstring"""
def __init__( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : Any =process_name # process name
UpperCamelCase__ : str =arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCamelCase__ : List[Any] =arrival_time
UpperCamelCase__ : int =burst_time # remaining burst time
UpperCamelCase__ : Any =0 # total time of the process wait in ready queue
UpperCamelCase__ : Dict =0 # time from arrival time to completion time
class __a :
"""simple docstring"""
def __init__( self : Any , lowercase_ : int , lowercase_ : list[int] , lowercase_ : deque[Process] , lowercase_ : int , ):
# total number of mlfq's queues
UpperCamelCase__ : Optional[Any] =number_of_queues
# time slice of queues that round robin algorithm applied
UpperCamelCase__ : Dict =time_slices
# unfinished process is in this ready_queue
UpperCamelCase__ : Optional[int] =queue
# current time
UpperCamelCase__ : Tuple =current_time
# finished process is in this sequence queue
UpperCamelCase__ : deque[Process] =deque()
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : int =[]
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _lowerCAmelCase ( self : Dict , lowercase_ : list[Process] ):
UpperCamelCase__ : List[str] =[]
for i in range(len(lowercase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _lowerCAmelCase ( self : List[Any] , lowercase_ : list[Process] ):
UpperCamelCase__ : Optional[int] =[]
for i in range(len(lowercase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _lowerCAmelCase ( self : str , lowercase_ : list[Process] ):
UpperCamelCase__ : int =[]
for i in range(len(lowercase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _lowerCAmelCase ( self : Tuple , lowercase_ : deque[Process] ):
return [q.burst_time for q in queue]
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _lowerCAmelCase ( self : Dict , lowercase_ : deque[Process] ):
UpperCamelCase__ : deque[Process] =deque() # sequence deque of finished process
while len(lowercase_ ) != 0:
UpperCamelCase__ : str =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCamelCase__ : Any =0
# set the process's turnaround time because it is finished
UpperCamelCase__ : int =self.current_time - cp.arrival_time
# set the completion time
UpperCamelCase__ : Union[str, Any] =self.current_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _lowerCAmelCase ( self : Tuple , lowercase_ : deque[Process] , lowercase_ : int ):
UpperCamelCase__ : deque[Process] =deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase_ ) ):
UpperCamelCase__ : Dict =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCamelCase__ : Optional[Any] =self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCamelCase__ : List[str] =0
# set the finish time
UpperCamelCase__ : Optional[int] =self.current_time
# update the process' turnaround time because it is finished
UpperCamelCase__ : Tuple =self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _lowerCAmelCase ( self : Any ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
UpperCamelCase__ : List[str] =self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_SCREAMING_SNAKE_CASE : List[Any] = Process("""P1""", 0, 5_3)
_SCREAMING_SNAKE_CASE : Dict = Process("""P2""", 0, 1_7)
_SCREAMING_SNAKE_CASE : int = Process("""P3""", 0, 6_8)
_SCREAMING_SNAKE_CASE : str = Process("""P4""", 0, 2_4)
_SCREAMING_SNAKE_CASE : Tuple = 3
_SCREAMING_SNAKE_CASE : Any = [1_7, 2_5]
_SCREAMING_SNAKE_CASE : Optional[int] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
_SCREAMING_SNAKE_CASE : int = Process("""P1""", 0, 5_3)
_SCREAMING_SNAKE_CASE : Optional[Any] = Process("""P2""", 0, 1_7)
_SCREAMING_SNAKE_CASE : Tuple = Process("""P3""", 0, 6_8)
_SCREAMING_SNAKE_CASE : Union[str, Any] = Process("""P4""", 0, 2_4)
_SCREAMING_SNAKE_CASE : Dict = 3
_SCREAMING_SNAKE_CASE : int = [1_7, 2_5]
_SCREAMING_SNAKE_CASE : Dict = deque([Pa, Pa, Pa, Pa])
_SCREAMING_SNAKE_CASE : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_SCREAMING_SNAKE_CASE : Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 368 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_SCREAMING_SNAKE_CASE : List[Any] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_SCREAMING_SNAKE_CASE : Tuple = """|""".join(sys.argv[1:])
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 157 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.