code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase = text, pattern
__lowercase , __lowercase = len(__UpperCAmelCase ), len(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = []
for i in range(self.textLen - self.patLen + 1 ):
__lowercase = self.mismatch_in_text(__UpperCAmelCase )
if mismatch_index == -1:
positions.append(__UpperCAmelCase )
else:
__lowercase = self.match_in_pattern(self.text[mismatch_index] )
__lowercase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case : List[str] = 'ABAABA'
snake_case : List[str] = 'AB'
snake_case : Any = BoyerMooreSearch(text, pattern)
snake_case : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 566 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case : Tuple = {'target_lang': 'fi', 'source_lang': 'en'}
snake_case : str = '>>zh<<'
snake_case : Optional[Any] = 'Helsinki-NLP/'
if is_torch_available():
snake_case : Optional[Any] = 'pt'
elif is_tf_available():
snake_case : Optional[int] = 'tf'
else:
snake_case : Optional[Any] = 'jax'
@require_sentencepiece
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Any = MarianTokenizer
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__UpperCAmelCase ) , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
__lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__UpperCAmelCase , batch.input_ids[0] )
__lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = [x.name for x in Path(__UpperCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __UpperCAmelCase )
MarianTokenizer.from_pretrained(__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(
["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowercase = """Tämä on testi"""
__lowercase = """This is a test"""
__lowercase = [7_6, 7, 2_0_4_7, 2]
__lowercase = [6_9, 1_2, 1_1, 9_4_0, 2]
__lowercase = tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer(text_target=__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 566 | 1 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (CMStochasticIterativeScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[int] , **A__ : int ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
a__ : Any = 1_0
a__ : Optional[Any] = self.get_scheduler_config()
a__ : Tuple = self.scheduler_classes[0](**A__ )
scheduler.set_timesteps(A__ )
a__ : List[str] = scheduler.timesteps[0]
a__ : str = scheduler.timesteps[1]
a__ : List[str] = self.dummy_sample
a__ : Tuple = 0.1 * sample
a__ : Union[str, Any] = scheduler.step(A__ , A__ , A__ ).prev_sample
a__ : int = scheduler.step(A__ , A__ , A__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=A__ )
def __lowerCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
a__ : Dict = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Union[str, Any] = scheduler_class(**A__ )
a__ : Union[str, Any] = 1
scheduler.set_timesteps(A__ )
a__ : Dict = scheduler.timesteps
a__ : Any = torch.manual_seed(0 )
a__ : List[Any] = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(A__ ):
# 1. scale model input
a__ : Dict = scheduler.scale_model_input(A__ , A__ )
# 2. predict noise residual
a__ : Any = model(A__ , A__ )
# 3. predict previous sample x_t-1
a__ : Tuple = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
a__ : str = pred_prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : str = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Optional[int] = self.get_scheduler_config()
a__ : Union[str, Any] = scheduler_class(**A__ )
a__ : Optional[Any] = [1_0_6, 0]
scheduler.set_timesteps(timesteps=A__ )
a__ : Union[str, Any] = scheduler.timesteps
a__ : int = torch.manual_seed(0 )
a__ : int = self.dummy_model()
a__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
a__ : Any = scheduler.scale_model_input(A__ , A__ )
# 2. predict noise residual
a__ : List[str] = model(A__ , A__ )
# 3. predict previous sample x_t-1
a__ : Optional[Any] = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
a__ : Optional[Any] = pred_prev_sample
a__ : Optional[Any] = torch.sum(torch.abs(A__ ) )
a__ : int = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : Dict = self.get_scheduler_config()
a__ : int = scheduler_class(**A__ )
a__ : str = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(A__ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A__ )
def __lowerCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.scheduler_classes[0]
a__ : Dict = self.get_scheduler_config()
a__ : Dict = scheduler_class(**A__ )
a__ : Union[str, Any] = [3_9, 3_0, 1_2, 1, 0]
a__ : List[str] = len(A__ )
with self.assertRaises(A__ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A__ , timesteps=A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
a__ : List[str] = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config()
a__ : Optional[int] = scheduler_class(**A__ )
a__ : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A__ )
| 340 |
'''simple docstring'''
from collections import deque
def __a ( lowerCAmelCase__ : int ):
a__ : int = len(lowerCAmelCase__ )
a__ : str = deque()
a__ : List[Any] = [False for _ in range(lowerCAmelCase__ )]
a__ : int = [-1 for _ in range(lowerCAmelCase__ )]
a__ : List[Any] = index_of[:]
def strong_connect(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ):
a__ : Any = index # the number when this node is seen
a__ : Union[str, Any] = index # lowest rank node reachable from here
index += 1
stack.append(lowerCAmelCase__ )
a__ : List[str] = True
for w in g[v]:
if index_of[w] == -1:
a__ : Union[str, Any] = strong_connect(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
a__ : int = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
a__ : Dict = []
a__ : Tuple = stack.pop()
a__ : Union[str, Any] = False
component.append(lowerCAmelCase__ )
while w != v:
a__ : Union[str, Any] = stack.pop()
a__ : Optional[Any] = False
component.append(lowerCAmelCase__ )
components.append(lowerCAmelCase__ )
return index
a__ : Tuple = []
for v in range(lowerCAmelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
return components
def __a ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ):
a__ : int = [[] for _ in range(lowerCAmelCase__ )]
for u, v in edges:
g[u].append(lowerCAmelCase__ )
return g
if __name__ == "__main__":
# Test
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__SCREAMING_SNAKE_CASE = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__SCREAMING_SNAKE_CASE = [(u, v) for u, v in zip(source, target)]
__SCREAMING_SNAKE_CASE = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 340 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _snake_case :
def __init__( self : int , UpperCAmelCase : str , UpperCAmelCase : int=13 , UpperCAmelCase : List[Any]=30 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=32 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : List[Any]=0.0_2 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : str=None , UpperCAmelCase : List[Any]=2 , ):
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : Any = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : List[str] = num_channels
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Any = use_labels
__lowerCamelCase : Tuple = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : List[Any] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Dict = scope
__lowerCamelCase : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : Dict = num_patches + 2
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int ):
__lowerCamelCase : Optional[int] = TFDeiTModel(config=UpperCAmelCase )
__lowerCamelCase : List[Any] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(config=UpperCAmelCase )
__lowerCamelCase : str = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase : List[str] = 1
__lowerCamelCase : Dict = TFDeiTForMaskedImageModeling(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : Any = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
__lowerCamelCase : Optional[Any] = self.type_sequence_label_size
__lowerCamelCase : str = TFDeiTForImageClassification(UpperCAmelCase )
__lowerCamelCase : List[Any] = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Optional[Any] = TFDeiTForImageClassification(UpperCAmelCase )
__lowerCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Dict = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = TFDeiTModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCamelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , tf.keras.layers.Dense ) )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class(UpperCAmelCase )
__lowerCamelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
__lowerCamelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any]=False ):
__lowerCamelCase : Optional[int] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = TFDeiTModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Optional[int] ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Tuple = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__lowerCamelCase : Tuple = self.default_image_processor
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : Union[str, Any] = image_processor(images=UpperCAmelCase , return_tensors="tf" )
# forward pass
__lowerCamelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify the logits
__lowerCamelCase : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__lowerCamelCase : List[str] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) ) | 646 | """simple docstring"""
from manim import *
class _snake_case ( a__ ):
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = Rectangle(height=0.5 , width=0.5 )
__lowerCamelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowerCamelCase : str = [mem.copy() for i in range(6 )]
__lowerCamelCase : str = [mem.copy() for i in range(6 )]
__lowerCamelCase : Union[str, Any] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : List[str] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : Dict = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : str = Text("CPU" , font_size=24 )
__lowerCamelCase : List[Any] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
__lowerCamelCase : Tuple = [mem.copy() for i in range(1 )]
__lowerCamelCase : List[str] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : Optional[Any] = Text("GPU" , font_size=24 )
__lowerCamelCase : Any = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.align_to(UpperCAmelCase , UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase )
__lowerCamelCase : List[Any] = [mem.copy() for i in range(6 )]
__lowerCamelCase : Optional[int] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : List[str] = Text("Model" , font_size=24 )
__lowerCamelCase : Tuple = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , )
__lowerCamelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__lowerCamelCase : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCamelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=2.5 ) , Write(UpperCAmelCase ) , Write(UpperCAmelCase ) )
self.add(UpperCAmelCase )
__lowerCamelCase : Any = []
__lowerCamelCase : int = []
__lowerCamelCase : Optional[Any] = []
for i, rect in enumerate(UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase )
cpu_target.generate_target()
__lowerCamelCase : Optional[Any] = 0.4_6 / 4
__lowerCamelCase : Dict = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase , buff=0.0 )
cpu_targs.append(UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase ) )
second_animations.append(MoveToTarget(UpperCAmelCase , run_time=1.5 ) )
self.play(*UpperCAmelCase )
self.play(*UpperCAmelCase )
self.wait() | 646 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
__A : int = parent
__A : Tuple = batch_size
__A : str = seq_length
__A : Union[str, Any] = is_training
__A : int = use_input_mask
__A : int = use_token_type_ids
__A : List[str] = use_labels
__A : Dict = vocab_size
__A : Optional[Any] = hidden_size
__A : str = num_hidden_layers
__A : int = num_attention_heads
__A : int = intermediate_size
__A : Optional[Any] = hidden_act
__A : Optional[Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_size
__A : Any = type_sequence_label_size
__A : Optional[Any] = initializer_range
__A : Tuple = num_labels
__A : Dict = num_choices
__A : Dict = scope
def __UpperCAmelCase( self ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[int] = None
if self.use_input_mask:
__A : int = random_attention_mask([self.batch_size, self.seq_length] )
__A : Tuple = None
if self.use_token_type_ids:
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Tuple = None
__A : Any = None
__A : int = None
if self.use_labels:
__A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : int = ids_tensor([self.batch_size] , self.num_choices )
__A : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Union[str, Any] = BioGptModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__A : Dict = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
__A : Tuple = BioGptForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ):
__A : Union[str, Any] = BioGptModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# create attention mask
__A : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCAmelCase )
__A : Optional[int] = self.seq_length // 2
__A : int = 0
# first forward pass
__A , __A : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__A : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__A : Optional[Any] = ids_tensor((1,) , __UpperCAmelCase ).item() + 1
__A : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__A : Any = random_other_next_tokens
# append to next input_ids and attn_mask
__A : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : Tuple = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__UpperCAmelCase )] , dim=1 , )
# get two different outputs
__A : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )["last_hidden_state"]
__A : str = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )["last_hidden_state"]
# select random slice
__A : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
__A : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ):
__A : Any = BioGptModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
__A : str = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCAmelCase )
# first forward pass
__A : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__A , __A : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__A : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__A : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__A : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )["last_hidden_state"]
__A : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[
"last_hidden_state"
]
# select random slice
__A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : str = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=False ):
__A : Optional[Any] = BioGptForCausalLM(__UpperCAmelCase )
model.to(__UpperCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__A : str = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCAmelCase( self , __UpperCAmelCase , *__UpperCAmelCase ):
__A : Any = BioGptModel(__UpperCAmelCase )
__A : Any = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ):
__A : int = self.num_labels
__A : str = BioGptForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase( self ):
__A : Dict = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCamelCase_ : Union[str, Any] = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : Tuple = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Any = False
def __UpperCAmelCase( self ):
__A : Any = BioGptModelTester(self )
__A : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : Tuple = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__UpperCAmelCase , gradient_checkpointing=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__UpperCAmelCase )
@slow
def __UpperCAmelCase( self ):
__A : Optional[int] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(__UpperCAmelCase )
__A : str = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__A : Tuple = "left"
# Define PAD Token = EOS Token = 50256
__A : List[str] = tokenizer.eos_token
__A : List[str] = model.config.eos_token_id
# use different length sentences to test batching
__A : Optional[Any] = [
"Hello, my dog is a little",
"Today, I",
]
__A : Dict = tokenizer(__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase )
__A : Dict = inputs["input_ids"].to(__UpperCAmelCase )
__A : Optional[Any] = model.generate(
input_ids=__UpperCAmelCase , attention_mask=inputs["attention_mask"].to(__UpperCAmelCase ) , )
__A : Optional[Any] = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(__UpperCAmelCase )
__A : List[str] = model.generate(input_ids=__UpperCAmelCase )
__A : str = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
__A : Dict = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(__UpperCAmelCase )
__A : Optional[int] = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__A : Tuple = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__A : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
__A : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
__A : str = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def __UpperCAmelCase( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Optional[Any] = BioGptModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : List[Any] = input_dict["input_ids"]
__A : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__A : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : str = BioGptForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase( self ):
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = 3
__A : int = "multi_label_classification"
__A : Optional[Any] = input_dict["input_ids"]
__A : Tuple = input_ids.ne(1 ).to(__UpperCAmelCase )
__A : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : str = BioGptForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase( self ):
__A : Optional[int] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
__A : str = torch.tensor([[2, 4_805, 9, 656, 21]] )
__A : Dict = model(__UpperCAmelCase )[0]
__A : Optional[int] = 42_384
__A : Optional[int] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__A : Tuple = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def __UpperCAmelCase( self ):
__A : Union[str, Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__A : Dict = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(__UpperCAmelCase )
torch.manual_seed(0 )
__A : Optional[Any] = tokenizer("COVID-19 is" , return_tensors="pt" ).to(__UpperCAmelCase )
__A : str = model.generate(
**__UpperCAmelCase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=__UpperCAmelCase , )
__A : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
__A : List[str] = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 387 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCamelCase_ ( _lowercase ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCamelCase_ ( _lowercase ) -> Dict:
__A : Dict = create_tensor(_lowercase )
__A : int = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCamelCase_ ( _lowercase ) -> str:
__A : Tuple = [state.process_index]
__A : Optional[int] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F"{gathered_obj}, {len(_lowercase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def lowerCamelCase_ ( _lowercase ) -> str:
__A : List[str] = create_tensor(_lowercase )
__A : Any = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCamelCase_ ( _lowercase ) -> str:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__A : str = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__A : Dict = torch.arange(state.num_processes ).to(state.device )
__A : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCamelCase_ ( _lowercase ) -> str:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Dict = create_tensor(_lowercase )
__A : int = reduce(_lowercase , "sum" )
__A : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase_ ( _lowercase ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Tuple = create_tensor(_lowercase )
__A : List[str] = reduce(_lowercase , "mean" )
__A : Union[str, Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase_ ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def lowerCamelCase_ ( ) -> List[str]:
__A : Optional[int] = PartialState()
state.print(F"State: {state}" )
state.print("testing gather" )
test_gather(_lowercase )
state.print("testing gather_object" )
test_gather_object(_lowercase )
state.print("testing broadcast" )
test_broadcast(_lowercase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_lowercase )
state.print("testing reduce_sum" )
test_reduce_sum(_lowercase )
state.print("testing reduce_mean" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 387 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase = (
subprocess.check_output(F'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('utf-8').split()
)
UpperCamelCase = "|".join(sys.argv[1:])
UpperCamelCase = re.compile(RF'^({joined_dirs}).*?\.py$')
UpperCamelCase = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 473 |
a_ :dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
a_ :dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def lowercase_ (A : float , A : str , A : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case__ : Tuple = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {", ".join(A )}'''
)
raise ValueError(A )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a= re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a= 1_0
a= 2_5_6
def _UpperCamelCase ( _a : List[str] ):
"""simple docstring"""
if len(_a ) < MIN_NUM_TOKENS:
return None
__UpperCamelCase : Optional[Any] = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__( self , *,
_lowerCamelCase = 0.8_5 , ):
__UpperCamelCase : List[Any] = duplication_jaccard_threshold
__UpperCamelCase : List[str] = NUM_PERM
__UpperCamelCase : str = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCamelCase : Dict = defaultdict(_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Optional[Any] = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[Any] = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCamelCase : Union[str, Any] = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__UpperCamelCase : List[str] = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : List[str] = self.get_duplicate_clusters()
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase ( _a : List[str] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[Any] = element
__UpperCamelCase : List[str] = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCamelCase ( _a : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _UpperCamelCase ( _a : Type[Dataset] , _a : float ):
"""simple docstring"""
__UpperCamelCase : List[str] = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=1_0_0 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCamelCase ( _a : str , _a : str ):
"""simple docstring"""
__UpperCamelCase : List[str] = get_tokens(_a )
__UpperCamelCase : str = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a= None
def _UpperCamelCase ( _a : Dict , _a : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : List[str] = []
for elementa in cluster:
__UpperCamelCase : Optional[Any] = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
__UpperCamelCase : List[Any] = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCamelCase : Union[str, Any] = 1
extremes.append(_a )
return extremes
def _UpperCamelCase ( _a : List[Any] , _a : Union[str, Any] , _a : Any ):
"""simple docstring"""
global _shared_dataset
__UpperCamelCase : Any = dataset
__UpperCamelCase : Any = []
__UpperCamelCase : List[str] = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def _UpperCamelCase ( _a : Type[Dataset] , _a : float = 0.85 ):
"""simple docstring"""
__UpperCamelCase : str = make_duplicate_clusters(_a , _a )
__UpperCamelCase : List[Any] = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : Any = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
__UpperCamelCase : List[Any] = element
__UpperCamelCase : List[str] = duplicate_indices - set(extreme_dict.keys() )
__UpperCamelCase : Optional[int] = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCamelCase : Optional[int] = element['base_index'] in extreme_dict
if element["is_extreme"]:
__UpperCamelCase : Optional[Any] = extreme_dict[element['base_index']]['copies']
print(f"""Original dataset size: {len(_a )}""" )
print(f"""Number of duplicate clusters: {len(_a )}""" )
print(f"""Files in duplicate cluster: {len(_a )}""" )
print(f"""Unique files in duplicate cluster: {len(_a )}""" )
print(f"""Filtered dataset size: {len(_a )}""" )
return ds_filter, duplicate_clusters
| 287 | '''simple docstring'''
def _UpperCamelCase ( _a : int ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase : Dict = gray_code_sequence_string(_a )
#
# convert them to integers
for i in range(len(_a ) ):
__UpperCamelCase : int = int(sequence[i] , 2 )
return sequence
def _UpperCamelCase ( _a : int ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase : Tuple = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase : Optional[Any] = '0' + smaller_sequence[i]
sequence.append(_a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase : List[Any] = '1' + smaller_sequence[i]
sequence.append(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 1 |
import re
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
lowerCAmelCase_ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone)) | 39 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : List[str] = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
def a_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ ( self : Any ):
"""simple docstring"""
return 12
@property
def a_ ( self : List[str] ):
"""simple docstring"""
return 12
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def a_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_lowerCamelCase )
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = 12
A_ : Optional[int] = 12
A_ : int = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
A_ : Tuple = TransformeraDModel(**_lowerCamelCase )
return model
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = '''cpu'''
A_ : Union[str, Any] = self.dummy_vqvae
A_ : str = self.dummy_text_encoder
A_ : List[Any] = self.dummy_tokenizer
A_ : int = self.dummy_transformer
A_ : Any = VQDiffusionScheduler(self.num_embed )
A_ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
A_ : Dict = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
A_ : List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : List[Any] = '''teddy bear playing in the pool'''
A_ : List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : List[Any] = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
A_ : Any = output.images
A_ : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : List[Any] = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A_ : Optional[int] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = '''cpu'''
A_ : int = self.dummy_vqvae
A_ : List[str] = self.dummy_text_encoder
A_ : Optional[Any] = self.dummy_tokenizer
A_ : Any = self.dummy_transformer
A_ : Any = VQDiffusionScheduler(self.num_embed )
A_ : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
A_ : int = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
A_ : List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Any = '''teddy bear playing in the pool'''
A_ : str = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : Optional[Any] = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
A_ : Tuple = output.images
A_ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : List[str] = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A_ : str = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
def a_ ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : str ):
"""simple docstring"""
A_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
A_ : int = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
A_ : Tuple = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
A_ : Dict = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : Union[str, Any] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_lowerCamelCase , output_type='''np''' , )
A_ : Optional[int] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 361 | 1 |
import random
class _lowerCAmelCase :
@staticmethod
def __a ( _UpperCamelCase ) -> tuple[list[int], list[int]]:
lowerCAmelCase_ = [ord(_UpperCamelCase ) for i in text]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for i in plain:
lowerCAmelCase_ = random.randint(1 , 300 )
lowerCAmelCase_ = (i + k) * k
cipher.append(_UpperCamelCase )
key.append(_UpperCamelCase )
return cipher, key
@staticmethod
def __a ( _UpperCamelCase , _UpperCamelCase ) -> str:
lowerCAmelCase_ = []
for i in range(len(_UpperCamelCase ) ):
lowerCAmelCase_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_UpperCamelCase ) )
return "".join(_UpperCamelCase )
if __name__ == "__main__":
_A, _A = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 290 |
from collections.abc import Callable
def lowerCamelCase__ ( __lowerCAmelCase : Callable[[float], float] , __lowerCAmelCase : float , __lowerCAmelCase : float ):
"""simple docstring"""
lowerCAmelCase_ = a
lowerCAmelCase_ = b
if function(__lowerCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__lowerCAmelCase ) == 0:
return b
elif (
function(__lowerCAmelCase ) * function(__lowerCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowerCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__lowerCAmelCase ) == 0:
return mid
elif function(__lowerCAmelCase ) * function(__lowerCAmelCase ) < 0:
lowerCAmelCase_ = mid
else:
lowerCAmelCase_ = mid
lowerCAmelCase_ = start + (end - start) / 2.0
return mid
def lowerCamelCase__ ( __lowerCAmelCase : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 290 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
_UpperCAmelCase = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
_UpperCAmelCase = shift_tokens_right(lowerCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_UpperCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
_UpperCAmelCase = optax.softmax_cross_entropy(lowerCamelCase , onehot(lowerCamelCase , logits.shape[-1] ) ).mean()
_UpperCAmelCase = -(labels.shape[-1] * loss.item())
_UpperCAmelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 ) | 402 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__a: str = get_tests_dir('''fixtures/test_sentencepiece.model''')
__a: Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
__a: Tuple = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = CamembertTokenizer
_lowerCamelCase = CamembertTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = """<pad>"""
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = tokenizer.tokenize(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
# fmt: off
_UpperCAmelCase = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCAmelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=lowerCamelCase , ) | 402 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = 42
_UpperCamelCase = 42
class _snake_case ( snake_case , snake_case ):
"""simple docstring"""
_UpperCamelCase = 1
@register_to_config
def __init__( self , UpperCAmelCase__ = 2000 , UpperCAmelCase__ = 0.1_5 , UpperCAmelCase__ = 0.0_1 , UpperCAmelCase__ = 1_3_4_8.0 , UpperCAmelCase__ = 1e-5 , UpperCAmelCase__ = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
a_ = sigma_max
# setable values
a_ = None
self.set_sigmas(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None ) -> Union[str, Any]:
a_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
a_ = torch.linspace(1 , UpperCAmelCase__ , UpperCAmelCase__ , device=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None ) -> Optional[Any]:
a_ = sigma_min if sigma_min is not None else self.config.sigma_min
a_ = sigma_max if sigma_max is not None else self.config.sigma_max
a_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
a_ = torch.exp(torch.linspace(math.log(UpperCAmelCase__ ) , math.log(UpperCAmelCase__ ) , UpperCAmelCase__ ) )
a_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
a_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
a_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
a_ = timesteps.to(self.discrete_sigmas.device )
a_ = self.discrete_sigmas[timesteps].to(sample.device )
a_ = self.get_adjacent_sigma(UpperCAmelCase__ , UpperCAmelCase__ ).to(sample.device )
a_ = torch.zeros_like(UpperCAmelCase__ )
a_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
a_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
a_ = diffusion.unsqueeze(-1 )
a_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
a_ = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCAmelCase__ , device=sample.device , dtype=sample.dtype )
a_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
a_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCAmelCase__ , prev_sample_mean=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
a_ = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCAmelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
a_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
a_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
a_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
a_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
a_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
a_ = step_size.unsqueeze(-1 )
a_ = sample + step_size * model_output
a_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
a_ = timesteps.to(original_samples.device )
a_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
a_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCAmelCase__ ) * sigmas[:, None, None, None]
)
a_ = noise + original_samples
return noisy_samples
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(1_0_0, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Tuple = checkpoint
snake_case : Any = {}
snake_case : Tuple = vae_state_dict['''encoder.conv_in.weight''']
snake_case : str = vae_state_dict['''encoder.conv_in.bias''']
snake_case : Tuple = vae_state_dict['''encoder.conv_out.weight''']
snake_case : Union[str, Any] = vae_state_dict['''encoder.conv_out.bias''']
snake_case : int = vae_state_dict['''encoder.norm_out.weight''']
snake_case : Optional[Any] = vae_state_dict['''encoder.norm_out.bias''']
snake_case : Optional[Any] = vae_state_dict['''decoder.conv_in.weight''']
snake_case : Any = vae_state_dict['''decoder.conv_in.bias''']
snake_case : Optional[Any] = vae_state_dict['''decoder.conv_out.weight''']
snake_case : int = vae_state_dict['''decoder.conv_out.bias''']
snake_case : List[Any] = vae_state_dict['''decoder.norm_out.weight''']
snake_case : List[str] = vae_state_dict['''decoder.norm_out.bias''']
snake_case : Dict = vae_state_dict['''quant_conv.weight''']
snake_case : Any = vae_state_dict['''quant_conv.bias''']
snake_case : Optional[int] = vae_state_dict['''post_quant_conv.weight''']
snake_case : Dict = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
snake_case : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
snake_case : Dict = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__magic_name__ )
}
# Retrieves the keys for the decoder up blocks only
snake_case : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
snake_case : List[Any] = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__magic_name__ )
}
for i in range(__magic_name__ ):
snake_case : List[Any] = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
snake_case : Union[str, Any] = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
snake_case : int = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
snake_case : Optional[int] = renew_vae_resnet_paths(__magic_name__ )
snake_case : List[Any] = {'''old''': F"down.{i}.block", '''new''': F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
snake_case : Tuple = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
snake_case : List[str] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
snake_case : int = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
snake_case : Optional[Any] = renew_vae_resnet_paths(__magic_name__ )
snake_case : List[Any] = {'''old''': F"mid.block_{i}", '''new''': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
snake_case : Optional[Any] = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
snake_case : List[str] = renew_vae_attention_paths(__magic_name__ )
snake_case : Optional[int] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
conv_attn_to_linear(__magic_name__ )
for i in range(__magic_name__ ):
snake_case : Tuple = num_up_blocks - 1 - i
snake_case : List[Any] = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
snake_case : Union[str, Any] = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
snake_case : Any = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
snake_case : Optional[int] = renew_vae_resnet_paths(__magic_name__ )
snake_case : List[Any] = {'''old''': F"up.{block_id}.block", '''new''': F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
snake_case : Optional[int] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
snake_case : List[str] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
snake_case : Optional[int] = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
snake_case : List[str] = renew_vae_resnet_paths(__magic_name__ )
snake_case : Tuple = {'''old''': F"mid.block_{i}", '''new''': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
snake_case : int = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
snake_case : Optional[Any] = renew_vae_attention_paths(__magic_name__ )
snake_case : str = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
conv_attn_to_linear(__magic_name__ )
return new_checkpoint
def a_ ( __magic_name__ , __magic_name__ , ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
snake_case : str = io.BytesIO(r.content )
snake_case : List[str] = OmegaConf.load(__magic_name__ )
snake_case : Tuple = 512
snake_case : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
snake_case : List[str] = {}
with safe_open(__magic_name__ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
snake_case : Optional[int] = f.get_tensor(__magic_name__ )
else:
snake_case : Tuple = torch.load(__magic_name__ , map_location=__magic_name__ )['''state_dict''']
# Convert the VAE model.
snake_case : Optional[int] = create_vae_diffusers_config(__magic_name__ , image_size=__magic_name__ )
snake_case : Optional[Any] = custom_convert_ldm_vae_checkpoint(__magic_name__ , __magic_name__ )
snake_case : List[Any] = AutoencoderKL(**__magic_name__ )
vae.load_state_dict(__magic_name__ )
vae.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_a : Dict = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 598 |
import operator as op
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : str = []
snake_case : Any = lambda __magic_name__ , __magic_name__ : int(x / y ) # noqa: E731 integer division operation
snake_case : Optional[Any] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(__magic_name__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__magic_name__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' )
else:
snake_case : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' )
snake_case : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(__magic_name__ ) , int(__magic_name__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
_a : Union[str, Any] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 598 | 1 |
'''simple docstring'''
import os
import sys
UpperCamelCase =os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase =[
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def snake_case ( *a_ : Dict , **a_ : int ):
"""simple docstring"""
return AutoConfig.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def snake_case ( *a_ : Any , **a_ : Optional[int] ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def snake_case ( *a_ : str , **a_ : str ):
"""simple docstring"""
return AutoModel.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def snake_case ( *a_ : Any , **a_ : str ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def snake_case ( *a_ : Dict , **a_ : List[Any] ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def snake_case ( *a_ : Any , **a_ : Tuple ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def snake_case ( *a_ : Dict , **a_ : Optional[int] ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*_snake_case , **_snake_case )
| 711 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCamelCase =""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class A ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase = " " ):
UpperCamelCase_ : str = sentence_delimiter
def _UpperCAmelCase ( self , __lowerCAmelCase ):
return list(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = []
for sent_idx, sentence in enumerate(__lowerCAmelCase ):
chars.extend(self.process_string(__lowerCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__lowerCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCamelCase =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCamelCase =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCamelCase ="\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCamelCase ="\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
UpperCamelCase ="\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
if concatenate_texts:
return jiwer.compute_measures(
__lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , )["wer"]
UpperCamelCase_ : Optional[Any] = 0
UpperCamelCase_ : str = 0
for prediction, reference in zip(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = jiwer.compute_measures(
__lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 543 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ : Dict = IFInpaintingSuperResolutionPipeline
__magic_name__ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
__magic_name__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any]=0 )-> Optional[int]:
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(UpperCamelCase__ )
else:
UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__( self : int )-> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__( self : List[Any] )-> int:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__( self : List[str] )-> List[Any]:
"""simple docstring"""
self._test_save_load_local()
def a__( self : str )-> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 210 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase: Optional[Any] = model_type_to_module_name(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__SCREAMING_SNAKE_CASE , "__name__" , __SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase: Union[str, Any] = importlib.import_module("transformers" )
if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return None
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) -> Optional[int]:
__lowerCAmelCase: Optional[Any] = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(__SCREAMING_SNAKE_CASE , encoding="utf-8" ) as reader:
return json.load(__SCREAMING_SNAKE_CASE )
class snake_case :
def __init__( self : List[Any])-> Dict:
'''simple docstring'''
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase__)
def lowercase_ ( cls : List[str] , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int])-> int:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = kwargs.pop("config" , UpperCamelCase__)
__lowerCAmelCase: Any = kwargs.pop("trust_remote_code" , UpperCamelCase__)
__lowerCAmelCase: Dict = True
__lowerCAmelCase , __lowerCAmelCase: List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: str = config_dict.get("feature_extractor_type" , UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {}):
__lowerCAmelCase: Dict = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
# It could be in `config.feature_extractor_type``
__lowerCAmelCase: List[Any] = getattr(UpperCamelCase__ , "feature_extractor_type" , UpperCamelCase__)
if hasattr(UpperCamelCase__ , "auto_map") and "AutoFeatureExtractor" in config.auto_map:
__lowerCAmelCase: Union[str, Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
__lowerCAmelCase: List[str] = feature_extractor_class_from_name(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = feature_extractor_auto_map is not None
__lowerCAmelCase: Union[str, Any] = feature_extractor_class is not None or type(UpperCamelCase__) in FEATURE_EXTRACTOR_MAPPING
__lowerCAmelCase: Optional[Any] = resolve_trust_remote_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
if has_remote_code and trust_remote_code:
__lowerCAmelCase: List[str] = get_class_from_dynamic_module(
UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Any = kwargs.pop("code_revision" , UpperCamelCase__)
if os.path.isdir(UpperCamelCase__):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase__) in FEATURE_EXTRACTOR_MAPPING:
__lowerCAmelCase: Tuple = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase__)]
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__)
raise ValueError(
f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}")
@staticmethod
def lowercase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int])-> Tuple:
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__)
| 346 | 0 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def lowercase__ ( snake_case_ :Optional[Any] ):
if hor == 128:
__UpperCAmelCase = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__UpperCAmelCase = (32, 128, 256)
__UpperCAmelCase = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
__UpperCAmelCase = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__UpperCAmelCase = (32, 64, 128, 256)
__UpperCAmelCase = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
__UpperCAmelCase = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
__UpperCAmelCase = model.state_dict()
__UpperCAmelCase = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 65_536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
__UpperCAmelCase = UNetaDModel(**snake_case_ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__UpperCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__UpperCAmelCase = state_dict.pop(snake_case_ )
hf_value_function.load_state_dict(snake_case_ )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 65_536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
__UpperCAmelCase = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
__UpperCAmelCase = model
__UpperCAmelCase = UNetaDModel(**snake_case_ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__UpperCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__UpperCAmelCase = state_dict.pop(snake_case_ )
hf_value_function.load_state_dict(snake_case_ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 397 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_lowercase : List[str] = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Tuple , snake_case_ :List[str] , snake_case_ :List[Any]=False , snake_case_ :List[Any]=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCAmelCase = cached_file(snake_case_ , snake_case_ , force_download=not use_cached_models )
__UpperCAmelCase = config_class.from_json_file(snake_case_ )
__UpperCAmelCase = True
__UpperCAmelCase = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__UpperCAmelCase = model_class(snake_case_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCAmelCase = cached_file(
snake_case_ , snake_case_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCAmelCase = load_pytorch_checkpoint_in_tfa_model(snake_case_ , snake_case_ )
if compare_with_pt_model:
__UpperCAmelCase = tf_model(tf_model.dummy_inputs , training=snake_case_ ) # build the network
__UpperCAmelCase = torch.load(snake_case_ , map_location='''cpu''' )
__UpperCAmelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=snake_case_ , config=snake_case_ , state_dict=snake_case_ )
with torch.no_grad():
__UpperCAmelCase = pt_model(**pt_model.dummy_inputs )
__UpperCAmelCase = pto[0].numpy()
__UpperCAmelCase = tfo[0].numpy()
__UpperCAmelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(snake_case_ , save_format='''h5''' )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] , snake_case_ :int=None , snake_case_ :Optional[int]=None , snake_case_ :List[str]=False , snake_case_ :Optional[int]=False , snake_case_ :Dict=False , snake_case_ :List[Any]=False , ):
if args_model_type is None:
__UpperCAmelCase = list(MODEL_CLASSES.keys() )
else:
__UpperCAmelCase = [args_model_type]
for j, model_type in enumerate(snake_case_ , start=1 ):
print('''=''' * 100 )
print(F''' Converting model type {j}/{len(snake_case_ )}: {model_type}''' )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCAmelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCAmelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(snake_case_ , snake_case_ ) , start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__UpperCAmelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(snake_case_ )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
__UpperCAmelCase = cached_file(snake_case_ , snake_case_ , force_download=not use_cached_models )
else:
__UpperCAmelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCAmelCase = cached_file(snake_case_ , snake_case_ , force_download=not use_cached_models )
else:
__UpperCAmelCase = model_shortcut_name
if os.path.isfile(snake_case_ ):
__UpperCAmelCase = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=snake_case_ , pytorch_checkpoint_path=snake_case_ , config_file=snake_case_ , tf_dump_path=os.path.join(snake_case_ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=snake_case_ , )
if remove_cached_files:
os.remove(snake_case_ )
os.remove(snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
_lowercase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 397 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ :List[str] = """big_bird"""
def __init__( self , __UpperCAmelCase=5_0_3_5_8 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=4_0_9_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=6_6 , __UpperCAmelCase="block_sparse" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=6_4 , __UpperCAmelCase=3 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
lowerCAmelCase__ :Tuple = vocab_size
lowerCAmelCase__ :Dict = max_position_embeddings
lowerCAmelCase__ :List[Any] = hidden_size
lowerCAmelCase__ :Dict = num_hidden_layers
lowerCAmelCase__ :Tuple = num_attention_heads
lowerCAmelCase__ :Optional[Any] = intermediate_size
lowerCAmelCase__ :str = hidden_act
lowerCAmelCase__ :str = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :int = type_vocab_size
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = use_cache
lowerCAmelCase__ :List[str] = rescale_embeddings
lowerCAmelCase__ :List[str] = attention_type
lowerCAmelCase__ :int = use_bias
lowerCAmelCase__ :Tuple = block_size
lowerCAmelCase__ :Optional[int] = num_random_blocks
lowerCAmelCase__ :Union[str, Any] = classifier_dropout
class _lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ :str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 93 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ) -> Union[str, Any]:
_A : List[str] = parent
_A : Optional[int] = batch_size
_A : int = image_size
_A : Optional[Any] = num_channels
_A : Any = embeddings_size
_A : Dict = hidden_sizes
_A : Any = depths
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Tuple = hidden_act
_A : Dict = num_labels
_A : Union[str, Any] = scope
_A : Optional[Any] = len(_a )
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.num_labels )
_A : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : Any = RegNetModel(config=_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : str = self.num_labels
_A : Any = RegNetForImageClassification(_a )
model.to(_a )
model.eval()
_A : str = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ) -> str:
_A : Union[str, Any] = self.prepare_config_and_inputs()
_A , _A , _A : Tuple = config_and_inputs
_A : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = RegNetModelTester(self )
_A : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a__ ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> Optional[Any]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Dict = [*signature.parameters.keys()]
_A : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> str:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Optional[Any]:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def a__ ( self ) -> Optional[int]:
def check_hidden_states_output(_a , _a , _a ):
_A : str = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : List[str] = model(**self._prepare_for_class(_a , _a ) )
_A : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A : Union[str, Any] = layer_type
_A : Tuple = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> Optional[int]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Tuple:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = RegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> str:
_A : Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_A : Any = self.default_image_processor
_A : Optional[int] = prepare_img()
_A : Tuple = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Union[str, Any] = model(**_a )
# verify the logits
_A : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : int = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 307 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
def A__ ( _UpperCAmelCase : int = 1_00_00_00 ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = limit + 1
snake_case__ : Union[str, Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case__ : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
snake_case__ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 150 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( __lowercase ):
def a ( self : Any ) -> int:
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'depth_multiplier' ) )
class _lowercase :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=13 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Tuple=0.2_5 , SCREAMING_SNAKE_CASE_ : Tuple=8 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Dict=1024 , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Optional[int]="relu6" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Any=10 , SCREAMING_SNAKE_CASE_ : List[str]=None , ) -> int:
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = min_depth
__snake_case = tf_padding
__snake_case = int(last_hidden_size * depth_multiplier )
__snake_case = output_stride
__snake_case = hidden_act
__snake_case = classifier_dropout_prob
__snake_case = use_labels
__snake_case = is_training
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = scope
def a ( self : int ) -> List[str]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self : List[Any] ) -> Tuple:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
__snake_case = MobileNetVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
__snake_case = self.num_labels
__snake_case = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
def a ( self : str ) -> str:
__snake_case = MobileNetVaModelTester(self )
__snake_case = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def a ( self : Tuple ) -> Dict:
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def a ( self : Any ) -> Dict:
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def a ( self : Dict ) -> Any:
pass
def a ( self : int ) -> Any:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__snake_case = outputs.hidden_states
__snake_case = 26
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def a ( self : Union[str, Any] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _a () -> int:
"""simple docstring"""
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def a ( self : List[Any] ) -> int:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def a ( self : Dict ) -> List[str]:
__snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__snake_case = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 56 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 1 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
"""simple docstring"""
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = load_from_cache_file
SCREAMING_SNAKE_CASE_ : Optional[int] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3 , lowercase__=4 , lowercase__=2 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=36 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=6 , lowercase__=6 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1000 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = shape_size
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE_ : Tuple = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE_ : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Tuple = tmp_coordinate
SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFLayoutLMvaModel(config=lowercase__ )
# text + image
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , training=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : int = model({"pixel_values": pixel_values} , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFLayoutLMvaForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_)) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
return True
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = {
k: tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase__ )
if getattr(lowercase__ , "hf_compute_loss" , lowercase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase__ )[0]
]
SCREAMING_SNAKE_CASE_ : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_ : str = -100
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE_ : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_ : List[Any] = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = signature_names.index(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = label_key
SCREAMING_SNAKE_CASE_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class[value]
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(lowercase__ )
# Send to model
SCREAMING_SNAKE_CASE_ : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[str] = type
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=lowercase__ , return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE_ : Dict = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 68 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _lowercase ( __lowerCamelCase ):
def __init__( self : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Union[str, Any]=1 ) -> Optional[Any]:
"""simple docstring"""
A_ = tokenizer
A_ = dataset
A_ = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
A_ = n_copies
def __iter__( self : str ) -> List[Any]:
"""simple docstring"""
A_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
A_ = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _lowercase ( __lowerCamelCase ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
A_ = start_length
A_ = eof_strings
A_ = tokenizer
def __call__( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
A_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
A_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = re.split('''(%s)''' % '''|'''.join(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# last string should be ""
return "".join(string_list[:-2] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=20 , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = defaultdict(SCREAMING_SNAKE_CASE ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(SCREAMING_SNAKE_CASE ) ):
with torch.no_grad():
A_ = batch['''ids'''].shape[-1]
A_ = accelerator.unwrap_model(SCREAMING_SNAKE_CASE ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# each task is generated batch_size times
A_ = batch['''task_id'''].repeat(SCREAMING_SNAKE_CASE )
A_ = accelerator.pad_across_processes(
SCREAMING_SNAKE_CASE , dim=1 , pad_index=tokenizer.pad_token_id )
A_ ,A_ = accelerator.gather((generated_tokens, generated_tasks) )
A_ = generated_tokens.cpu().numpy()
A_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
gen_token_dict[task].append(SCREAMING_SNAKE_CASE )
A_ = [[] for _ in range(SCREAMING_SNAKE_CASE )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
A_ = tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
code_gens[task].append(remove_last_block(SCREAMING_SNAKE_CASE ) )
return code_gens
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = HfArgumentParser(SCREAMING_SNAKE_CASE )
A_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
A_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
A_ = '''false'''
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
A_ = Accelerator()
set_seed(args.seed , device_specific=SCREAMING_SNAKE_CASE )
# Load model and tokenizer
A_ = AutoTokenizer.from_pretrained(args.model_ckpt )
A_ = tokenizer.eos_token
A_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
A_ = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] ),
}
# Load evaluation dataset and metric
A_ = load_dataset('''openai_humaneval''' )
A_ = load_metric('''code_eval''' )
A_ = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
A_ = args.n_samples // args.batch_size
A_ = TokenizedDataset(SCREAMING_SNAKE_CASE , human_eval['''test'''] , n_copies=SCREAMING_SNAKE_CASE , n_tasks=SCREAMING_SNAKE_CASE )
# do not confuse args.batch_size, which is actually the num_return_sequences
A_ = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
A_ = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
A_ ,A_ = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = complete_code(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , n_tasks=SCREAMING_SNAKE_CASE , batch_size=args.batch_size , **SCREAMING_SNAKE_CASE , )
if accelerator.is_main_process:
A_ = []
for task in tqdm(range(SCREAMING_SNAKE_CASE ) ):
A_ = human_eval['''test'''][task]['''test''']
A_ = f"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
A_ ,A_ = code_eval_metric.compute(
references=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 203 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
A_ = True
# Deal with multi-line cases
elif (
re.search(
Rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , SCREAMING_SNAKE_CASE , )
is not None
):
A_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
A_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
A_ = True
if not attribute_used:
A_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A_ = True
elif attribute.endswith('''_token_id''' ):
A_ = True
# configuration class specific cases
if not case_allowed:
A_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = dict(inspect.signature(config_class.__init__ ).parameters )
A_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
A_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A_ = {}
if len(config_class.attribute_map ) > 0:
A_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A_ = inspect.getsourcefile(SCREAMING_SNAKE_CASE )
A_ = os.path.dirname(SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A_ = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for fn in os.listdir(SCREAMING_SNAKE_CASE ) if fn.startswith('''modeling_''' )]
# Get the source code strings
A_ = []
for path in modeling_paths:
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
A_ = []
for config_param, default_value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
A_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE : inspect.isclass(SCREAMING_SNAKE_CASE )
and issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and inspect.getmodule(SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A_ = check_config_attributes_being_used(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
A_ = unused_attributes
if len(SCREAMING_SNAKE_CASE ) > 0:
A_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes()
| 203 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The column name of the images in the files."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self ):
a ={}
if self.train_dir is not None:
a =self.train_dir
if self.validation_dir is not None:
a =self.validation_dir
a =data_files if data_files else None
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
_SCREAMING_SNAKE_CASE : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_SCREAMING_SNAKE_CASE : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
_SCREAMING_SNAKE_CASE : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_SCREAMING_SNAKE_CASE : float = field(
default=0.7_5 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
_SCREAMING_SNAKE_CASE : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : float = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def lowerCamelCase ( UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
a =torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase ( )-> Any:
"""simple docstring"""
a =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a =training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
a =None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase_ ) and data_args.train_val_split > 0.0:
a =ds["""train"""].train_test_split(data_args.train_val_split )
a =split["""train"""]
a =split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a ={
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
a =ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase_ )
elif model_args.model_name_or_path:
a =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase_ )
else:
a =ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
a =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase_ )
elif model_args.model_name_or_path:
a =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase_ )
else:
a =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
a =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
a =ViTMAEForPreTraining(UpperCAmelCase_ )
if training_args.do_train:
a =ds["""train"""].column_names
else:
a =ds["""validation"""].column_names
if data_args.image_column_name is not None:
a =data_args.image_column_name
elif "image" in column_names:
a ="""image"""
elif "img" in column_names:
a ="""img"""
else:
a =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
a =image_processor.size["""shortest_edge"""]
else:
a =(image_processor.size["""height"""], image_processor.size["""width"""])
a =Compose(
[
Lambda(lambda UpperCAmelCase_ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCAmelCase_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCAmelCase_ ):
a =[transforms(UpperCAmelCase_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
a =ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
a =(
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase_ )
# Compute absolute learning rate
a =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
a =training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
a =Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , )
# Training
if training_args.do_train:
a =None
if training_args.resume_from_checkpoint is not None:
a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a =last_checkpoint
a =trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a =trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase_ )
trainer.save_metrics("""eval""" , UpperCAmelCase_ )
# Write model card and (optionally) push to hub
a ={
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def lowerCamelCase ( UpperCAmelCase_ )-> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowerCamelCase = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_lowerCamelCase = 10
_lowerCamelCase = 256
def lowerCamelCase ( UpperCAmelCase_ : List[str] )-> Optional[MinHash]:
"""simple docstring"""
if len(UpperCAmelCase_ ) < MIN_NUM_TOKENS:
return None
a =MinHash(num_perm=UpperCAmelCase_ )
for token in set(UpperCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase ( UpperCAmelCase_ : str )-> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(UpperCAmelCase_ ) if len(t.strip() ) > 0}
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
a =duplication_jaccard_threshold
a =NUM_PERM
a =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a =defaultdict(_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
a =self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =[]
for base, duplicates in self._duplicate_clusters.items():
a =[base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
a =[{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =self.get_duplicate_clusters()
with open(_lowerCAmelCase , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] )-> str:
"""simple docstring"""
a , a =element
a =get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] )-> Any:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(UpperCAmelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] , UpperCAmelCase_ : float )-> Union[str, Any]:
"""simple docstring"""
a =DuplicationIndex(duplication_jaccard_threshold=UpperCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(UpperCAmelCase_ , UpperCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> float:
"""simple docstring"""
a =get_tokens(UpperCAmelCase_ )
a =get_tokens(UpperCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowerCamelCase = None
def lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] )-> List[str]:
"""simple docstring"""
a =[]
for elementa in cluster:
a =_shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
a =_shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(UpperCAmelCase_ , UpperCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a =1
extremes.append(UpperCAmelCase_ )
return extremes
def lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] )-> int:
"""simple docstring"""
global _shared_dataset
a =dataset
a =[]
a =partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCAmelCase_ , UpperCAmelCase_ , ) , total=len(UpperCAmelCase_ ) , ):
extremes_list.append(UpperCAmelCase_ )
return extremes_list
def lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] , UpperCAmelCase_ : float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
a =make_duplicate_clusters(UpperCAmelCase_ , UpperCAmelCase_ )
a ={x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
a ={}
a =find_extremes(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
a =element
a =duplicate_indices - set(extreme_dict.keys() )
a =dataset.filter(lambda UpperCAmelCase_ , UpperCAmelCase_ : idx not in remove_indices , with_indices=UpperCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a =element["""base_index"""] in extreme_dict
if element["is_extreme"]:
a =extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(UpperCAmelCase_ )}''' )
print(F'''Number of duplicate clusters: {len(UpperCAmelCase_ )}''' )
print(F'''Files in duplicate cluster: {len(UpperCAmelCase_ )}''' )
print(F'''Unique files in duplicate cluster: {len(UpperCAmelCase_ )}''' )
print(F'''Filtered dataset size: {len(UpperCAmelCase_ )}''' )
return ds_filter, duplicate_clusters
| 321 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( snake_case ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )}
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :int = ArgumentParser(
'''HuggingFace Datasets CLI tool''', usage='''datasets-cli <command> [<args>]''', allow_abbrev=snake_case )
__magic_name__ :Any = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(snake_case )
EnvironmentCommand.register_subcommand(snake_case )
TestCommand.register_subcommand(snake_case )
RunBeamCommand.register_subcommand(snake_case )
DummyDataCommand.register_subcommand(snake_case )
# Parse args
__magic_name__ , __magic_name__ :int = parser.parse_known_args()
if not hasattr(snake_case, '''func''' ):
parser.print_help()
exit(1 )
__magic_name__ :List[Any] = parse_unknown_args(snake_case )
# Run
__magic_name__ :str = args.func(snake_case, **snake_case )
service.run()
if __name__ == "__main__":
main()
| 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowerCAmelCase : Any = 1_6
_lowerCAmelCase : str = 3_2
def UpperCAmelCase_ ( snake_case__ ) -> str:
"""simple docstring"""
return int(x / 2**20 )
class __snake_case :
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase__ = torch.cuda.memory_allocated()
return self
def __exit__( self ,*a_ ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
lowerCAmelCase__ = bamb(self.end - self.begin )
lowerCAmelCase__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCAmelCase_ ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" , snake_case__ = 320 , snake_case__ = 160 , ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase__ = load_dataset(
'glue' , 'mrpc' , split={'train': f'train[:{n_train}]', 'validation': f'validation[:{n_val}]'} )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['lr']
lowerCAmelCase__ = int(config['num_epochs'] )
lowerCAmelCase__ = int(config['seed'] )
lowerCAmelCase__ = int(config['batch_size'] )
lowerCAmelCase__ = args.model_name_or_path
set_seed(snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
lowerCAmelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCAmelCase__ = 1
lowerCAmelCase__ = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
lowerCAmelCase__ = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ = 0
# Now we train the model
lowerCAmelCase__ = {}
for epoch in range(snake_case__ , snake_case__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case__ ):
lowerCAmelCase__ = model(**snake_case__ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=snake_case__ , default=snake_case__ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=snake_case__ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=snake_case__ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=1 , help='Number of train epochs.' , )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'llama'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
def __init__( self ,a_=3_2000 ,a_=4096 ,a_=1_1008 ,a_=32 ,a_=32 ,a_=None ,a_="silu" ,a_=2048 ,a_=0.02 ,a_=1e-6 ,a_=True ,a_=0 ,a_=1 ,a_=2 ,a_=1 ,a_=False ,a_=None ,**a_ ,):
"""simple docstring"""
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = num_key_value_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = rms_norm_eps
lowerCAmelCase__ = pretraining_tp
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,tie_word_embeddings=a_ ,**a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
lowerCAmelCase__ = self.rope_scaling.get('type' ,a_ )
lowerCAmelCase__ = self.rope_scaling.get('factor' ,a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(a_ ,a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 604 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = 42
_UpperCamelCase = 42
class __lowercase (nn.Module ):
_UpperCamelCase = 42
_UpperCamelCase = (16, 32, 96, 256)
_UpperCamelCase = jnp.floataa
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCAmelCase : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowerCAmelCase : List[Any] = self.block_out_channels[i]
__lowerCAmelCase : Dict = self.block_out_channels[i + 1]
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
__lowerCAmelCase : List[str] = blocks
__lowerCAmelCase : str = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Any = self.conv_in(A_ )
__lowerCAmelCase : int = nn.silu(A_ )
for block in self.blocks:
__lowerCAmelCase : int = block(A_ )
__lowerCAmelCase : Union[str, Any] = nn.silu(A_ )
__lowerCAmelCase : List[Any] = self.conv_out(A_ )
return embedding
@flax_register_to_config
class __lowercase (nn.Module , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCamelCase = 32
_UpperCamelCase = 4
_UpperCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase = False
_UpperCamelCase = (320, 640, 1280, 1280)
_UpperCamelCase = 2
_UpperCamelCase = 8
_UpperCamelCase = None
_UpperCamelCase = 1280
_UpperCamelCase = 0.0
_UpperCamelCase = False
_UpperCamelCase = jnp.floataa
_UpperCamelCase = True
_UpperCamelCase = 0
_UpperCamelCase = "rgb"
_UpperCamelCase = (16, 32, 96, 256)
def UpperCamelCase__ ( self , A_ ) ->FrozenDict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCAmelCase : str = jnp.zeros(A_ , dtype=jnp.floataa )
__lowerCAmelCase : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
__lowerCAmelCase : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowerCAmelCase : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowerCAmelCase : Optional[Any] = jnp.zeros(A_ , dtype=jnp.floataa )
__lowerCAmelCase, __lowerCAmelCase : Any = jax.random.split(A_ )
__lowerCAmelCase : int = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A_ , A_ , A_ , A_ , A_ )["params"]
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.block_out_channels
__lowerCAmelCase : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCAmelCase : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
__lowerCAmelCase : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowerCAmelCase : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowerCAmelCase : str = FlaxTimestepEmbedding(A_ , dtype=self.dtype )
__lowerCAmelCase : Optional[int] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__lowerCAmelCase : Union[str, Any] = self.only_cross_attention
if isinstance(A_ , A_ ):
__lowerCAmelCase : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A_ , A_ ):
__lowerCAmelCase : str = (num_attention_heads,) * len(self.down_block_types )
# down
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Optional[Any] = block_out_channels[0]
__lowerCAmelCase : List[str] = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowerCAmelCase : List[Any] = output_channel
__lowerCAmelCase : Optional[int] = block_out_channels[i]
__lowerCAmelCase : str = i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCAmelCase : List[str] = FlaxCrossAttnDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__lowerCAmelCase : List[Any] = FlaxDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A_ )
for _ in range(self.layers_per_block ):
__lowerCAmelCase : str = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
if not is_final_block:
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
__lowerCAmelCase : int = down_blocks
__lowerCAmelCase : Optional[int] = controlnet_down_blocks
# mid
__lowerCAmelCase : List[str] = block_out_channels[-1]
__lowerCAmelCase : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=A_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ , A_ , A_ , A_ , A_ = 1.0 , A_ = True , A_ = False , ) ->Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
__lowerCAmelCase : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowerCAmelCase : List[Any] = jnp.flip(A_ , axis=1 )
# 1. time
if not isinstance(A_ , jnp.ndarray ):
__lowerCAmelCase : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowerCAmelCase : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__lowerCAmelCase : Optional[Any] = jnp.expand_dims(A_ , 0 )
__lowerCAmelCase : Optional[int] = self.time_proj(A_ )
__lowerCAmelCase : str = self.time_embedding(A_ )
# 2. pre-process
__lowerCAmelCase : List[str] = jnp.transpose(A_ , (0, 2, 3, 1) )
__lowerCAmelCase : Union[str, Any] = self.conv_in(A_ )
__lowerCAmelCase : Tuple = jnp.transpose(A_ , (0, 2, 3, 1) )
__lowerCAmelCase : int = self.controlnet_cond_embedding(A_ )
sample += controlnet_cond
# 3. down
__lowerCAmelCase : Tuple = (sample,)
for down_block in self.down_blocks:
if isinstance(A_ , A_ ):
__lowerCAmelCase, __lowerCAmelCase : Dict = down_block(A_ , A_ , A_ , deterministic=not train )
else:
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = down_block(A_ , A_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowerCAmelCase : Union[str, Any] = self.mid_block(A_ , A_ , A_ , deterministic=not train )
# 5. contronet blocks
__lowerCAmelCase : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(A_ , self.controlnet_down_blocks ):
__lowerCAmelCase : List[Any] = controlnet_block(A_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowerCAmelCase : Dict = controlnet_down_block_res_samples
__lowerCAmelCase : int = self.controlnet_mid_block(A_ )
# 6. scaling
__lowerCAmelCase : Tuple = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A_ , mid_block_res_sample=A_ )
| 492 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False ):
__lowerCAmelCase : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _lowercase ( lowercase__ , lowercase__ ):
for i in range(config.num_hidden_layers ):
__lowerCAmelCase : Union[str, Any] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase : str = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase : Any = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
__lowerCAmelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[Any] = dct.pop(lowercase__ )
__lowerCAmelCase : Union[str, Any] = val
@torch.no_grad()
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Tuple = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=lowercase__ )
__lowerCAmelCase : int = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Optional[Any] = False
if "vqa" in checkpoint_url:
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = 3_1_2_9
__lowerCAmelCase : Optional[int] = '''huggingface/label-files'''
__lowerCAmelCase : List[str] = '''vqa2-id2label.json'''
__lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowerCAmelCase : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase : Optional[int] = idalabel
__lowerCAmelCase : int = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : Tuple = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Optional[Any] = 2
__lowerCAmelCase : Tuple = {0: '''False''', 1: '''True'''}
__lowerCAmelCase : List[str] = {v: k for k, v in config.idalabel.items()}
__lowerCAmelCase : int = 3
__lowerCAmelCase : Optional[Any] = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Tuple = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase__ , map_location='''cpu''' )['''state_dict''']
__lowerCAmelCase : Optional[Any] = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ )
if mlm_model or irtr_model:
__lowerCAmelCase : List[Any] = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
__lowerCAmelCase : Optional[int] = ViltImageProcessor(size=3_8_4 )
__lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCAmelCase : List[str] = ViltProcessor(lowercase__ , lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
__lowerCAmelCase : Optional[Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=lowercase__ ).raw )
__lowerCAmelCase : List[Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=lowercase__ ).raw )
__lowerCAmelCase : Dict = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
__lowerCAmelCase : int = processor(lowercase__ , lowercase__ , return_tensors='''pt''' )
__lowerCAmelCase : Any = processor(lowercase__ , lowercase__ , return_tensors='''pt''' )
__lowerCAmelCase : List[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
__lowerCAmelCase : List[str] = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=lowercase__ ).raw )
if mlm_model:
__lowerCAmelCase : List[str] = '''a bunch of [MASK] laying on a [MASK].'''
else:
__lowerCAmelCase : Tuple = '''How many cats are there?'''
__lowerCAmelCase : str = processor(lowercase__ , lowercase__ , return_tensors='''pt''' )
__lowerCAmelCase : Tuple = model(**lowercase__ )
# Verify outputs
if mlm_model:
__lowerCAmelCase : int = torch.Size([1, 1_1, 3_0_5_2_2] )
__lowerCAmelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
__lowerCAmelCase : List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__lowerCAmelCase : Optional[int] = torch.Size([1, 3_1_2_9] )
__lowerCAmelCase : Dict = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify vqa prediction equals "2"
__lowerCAmelCase : Optional[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__lowerCAmelCase : Optional[Any] = torch.Size([1, 2] )
__lowerCAmelCase : Any = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 492 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_00 * 2**20, 9_00 * 2**20] )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> str:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , lowercase__ )
lowerCAmelCase__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowerCAmelCase__ = dataset_size < in_memory_max_size
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = is_small_dataset(lowercase__ )
assert result == expected
| 718 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCAmelCase_ :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=50 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def __snake_case ( self : List[str] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def __snake_case ( self : str ):
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase__ = True
lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = True
lowerCAmelCase__ = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['''hidden_states'''][0]
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = BertGenerationDecoder(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Dict = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCamelCase_ :str = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCamelCase_ :List[str] = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = BertGenerationEncoderTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
lowerCAmelCase__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : Any ):
lowerCAmelCase__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
lowerCAmelCase__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 288 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCAmelCase ( UpperCAmelCase=None ) ->str:
"""simple docstring"""
if subparsers is not None:
__magic_name__ : Tuple = subparsers.add_parser('''test''' )
else:
__magic_name__ : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''', default=UpperCAmelCase, help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
), )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Any = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
__magic_name__ : Any = script_name
else:
__magic_name__ : Union[str, Any] = F'''--config_file={args.config_file} {script_name}'''
__magic_name__ : int = ['''accelerate-launch'''] + test_args.split()
__magic_name__ : Any = execute_subprocess_async(UpperCAmelCase, env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def lowerCAmelCase ( ) ->List[str]:
"""simple docstring"""
__magic_name__ : List[Any] = test_command_parser()
__magic_name__ : Any = parser.parse_args()
test_command(UpperCAmelCase )
if __name__ == "__main__":
main()
| 154 |
def lowerCAmelCase ( UpperCAmelCase = 6008_5147_5143 ) ->int:
"""simple docstring"""
try:
__magic_name__ : Optional[int] = int(UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__magic_name__ : List[str] = 2
__magic_name__ : Optional[int] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__magic_name__ : Any = i
while n % i == 0:
__magic_name__ : Union[str, Any] = n // i
i += 1
return int(UpperCAmelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 154 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return round(float(moles / volume ) * nfactor )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=99 , __lowerCamelCase : Dict=16 , __lowerCamelCase : int=36 , __lowerCamelCase : Dict=6 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : Any=6 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_hidden_groups
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : str ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _snake_case ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def _snake_case ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = AlbertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def _snake_case ( self : Optional[Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) ) | 16 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) ->str | Literal[False]:
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_SCREAMING_SNAKE_CASE = """_"""
if count > 1:
return False
else:
return "".join(__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : list[str] ) ->list[str]:
_SCREAMING_SNAKE_CASE = []
while True:
_SCREAMING_SNAKE_CASE = ["""$"""] * len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = []
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1 , len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = compare_string(binary[i] , binary[j] )
if k is False:
_SCREAMING_SNAKE_CASE = """*"""
_SCREAMING_SNAKE_CASE = """*"""
temp.append("""X""" )
for i in range(len(__lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCamelCase ) == 0:
return pi
_SCREAMING_SNAKE_CASE = list(set(__lowerCamelCase ) )
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Sequence[float] ) ->list[str]:
_SCREAMING_SNAKE_CASE = []
for minterm in minterms:
_SCREAMING_SNAKE_CASE = """"""
for _ in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCamelCase )
return temp
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int ) ->bool:
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[str] ) ->list[str]:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [0] * len(__lowerCamelCase )
for i in range(len(chart[0] ) ):
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = -1
for j in range(len(__lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_SCREAMING_SNAKE_CASE = j
if count == 1:
_SCREAMING_SNAKE_CASE = 1
for i in range(len(__lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = 0
temp.append(prime_implicants[i] )
while True:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = -1
_SCREAMING_SNAKE_CASE = 0
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = chart[i].count(1 )
if count_n > max_n:
_SCREAMING_SNAKE_CASE = count_n
_SCREAMING_SNAKE_CASE = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = 0
def lowerCamelCase ( __lowerCamelCase : list[str] , __lowerCamelCase : list[str] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = [[0 for x in range(len(__lowerCamelCase ) )] for x in range(len(__lowerCamelCase ) )]
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = prime_implicants[i].count("""_""" )
for j in range(len(__lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = 1
return chart
def lowerCamelCase ( ) ->None:
_SCREAMING_SNAKE_CASE = int(input("""Enter the no. of variables\n""" ) )
_SCREAMING_SNAKE_CASE = [
float(__lowerCamelCase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_SCREAMING_SNAKE_CASE = decimal_to_binary(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = check(__lowerCamelCase )
print("""Prime Implicants are:""" )
print(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = prime_implicant_chart(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = selection(__lowerCamelCase , __lowerCamelCase )
print("""Essential Prime Implicants are:""" )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 314 | 0 |
'''simple docstring'''
def a_ ( ):
lowerCAmelCase = []
lowerCAmelCase = 1
while len(lowerCamelCase ) < 1e6:
constant.append(str(lowerCamelCase ) )
i += 1
lowerCAmelCase = ''.join(lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 712 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a_ ( lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Any ):
lowerCAmelCase = OmegaConf.load(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )['model']
lowerCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase = {}
lowerCAmelCase = 'first_stage_model.'
for key in keys:
if key.startswith(lowerCamelCase ):
lowerCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase = {}
lowerCAmelCase = 'model.diffusion_model.'
for key in keys:
if key.startswith(lowerCamelCase ):
lowerCAmelCase = state_dict[key]
lowerCAmelCase = config.model.params.first_stage_config.params
lowerCAmelCase = config.model.params.unet_config.params
lowerCAmelCase = VQModel(**lowerCamelCase ).eval()
vqvae.load_state_dict(lowerCamelCase )
lowerCAmelCase = UNetLDMModel(**lowerCamelCase ).eval()
unet.load_state_dict(lowerCamelCase )
lowerCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCamelCase , )
lowerCAmelCase = LDMPipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase )
pipeline.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
__snake_case =parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 513 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="resnet50" , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Tuple = out_indices if out_indices is not None else [4]
UpperCAmelCase__ : List[str] = stage_names
UpperCAmelCase__ : Dict = out_features
UpperCAmelCase__ : Any = backbone
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : Dict = use_pretrained_backbone
UpperCAmelCase__ : List[Any] = is_training
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values
def snake_case__ ( self):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = TimmBackbone(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(_lowerCamelCase)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs
UpperCAmelCase__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase :int = (TimmBackbone,) if is_torch_available() else ()
lowerCAmelCase :Tuple = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
lowerCAmelCase :Any = False
lowerCAmelCase :Tuple = False
lowerCAmelCase :Optional[int] = False
lowerCAmelCase :int = False
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = TimmBackboneModelTester(self)
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase)
def snake_case__ ( self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """resnet18"""
UpperCAmelCase__ : Optional[int] = """microsoft/resnet-18"""
UpperCAmelCase__ : Optional[int] = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase)
UpperCAmelCase__ : str = AutoBackbone.from_pretrained(_lowerCamelCase)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
UpperCAmelCase__ : Tuple = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase , out_indices=[1, 2, 3])
UpperCAmelCase__ : Optional[Any] = AutoBackbone.from_pretrained(_lowerCamelCase , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""")
def snake_case__ ( self):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""")
def snake_case__ ( self):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""")
def snake_case__ ( self):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def snake_case__ ( self):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def snake_case__ ( self):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""")
def snake_case__ ( self):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def snake_case__ ( self):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def snake_case__ ( self):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def snake_case__ ( self):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def snake_case__ ( self):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def snake_case__ ( self):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""")
def snake_case__ ( self):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""")
def snake_case__ ( self):
pass
@unittest.skip("""Safetensors is not supported by timm.""")
def snake_case__ ( self):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def snake_case__ ( self):
pass
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase)
UpperCAmelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase__ : str = self.all_model_classes[0]
UpperCAmelCase__ : int = model_class(_lowerCamelCase)
model.to(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = model(**_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase__ : List[str] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase__ : Tuple = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCamelCase)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase__ : Dict = copy.deepcopy(_lowerCamelCase)
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : List[Any] = model_class(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Optional[int] = model(**_lowerCamelCase)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
UpperCAmelCase__ : List[Any] = copy.deepcopy(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = model_class(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : List[Any] = model(**_lowerCamelCase) | 407 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A ='<<<<<<< This should probably be modified because it mentions: '
__A ='=======\n>>>>>>>\n'
__A =[
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__A =[
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _UpperCamelCase ( UpperCamelCase__ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( a__ ):
@staticmethod
def snake_case__ ( _lowerCamelCase):
UpperCAmelCase__ : List[str] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to the HuggingFace Datasets folder.""")
train_parser.set_defaults(func=_lowerCamelCase)
def __init__( self , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = get_logger("""datasets-cli/converting""")
UpperCAmelCase__ : Any = tfds_path
UpperCAmelCase__ : Any = datasets_directory
def snake_case__ ( self):
if os.path.isdir(self._tfds_path):
UpperCAmelCase__ : Dict = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
UpperCAmelCase__ : str = os.path.dirname(self._tfds_path)
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""")
UpperCAmelCase__ : List[Any] = os.path.abspath(self._datasets_directory)
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''')
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = {}
if os.path.isdir(self._tfds_path):
UpperCAmelCase__ : Dict = os.listdir(_lowerCamelCase)
else:
UpperCAmelCase__ : List[Any] = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''')
UpperCAmelCase__ : str = os.path.join(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not os.path.isfile(_lowerCamelCase) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""")
continue
with open(_lowerCamelCase , encoding="""utf-8""") as f:
UpperCAmelCase__ : Optional[Any] = f.readlines()
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[Any] = []
for line in lines:
UpperCAmelCase__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase__ : str = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase__ : Optional[Any] = """"""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase__ : List[Any] = """from datasets import logging\n"""
elif "getLogger" in out_line:
UpperCAmelCase__ : Optional[int] = out_line.replace("""getLogger""" , """get_logger""")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : List[Any] = list(filter(lambda _lowerCamelCase: e in out_line , _lowerCamelCase))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase) + """\n""")
out_lines.append(_lowerCamelCase)
out_lines.append(_lowerCamelCase)
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase__ : Optional[Any] = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase__ : List[str] = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowerCamelCase)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(""","""))
UpperCAmelCase__ : Dict = """from . import """ + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''')
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase__ : int = True
out_lines.append(_lowerCamelCase)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase__ : Optional[Any] = f_name.replace(""".py""" , """""")
UpperCAmelCase__ : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : int = os.path.join(_lowerCamelCase , _lowerCamelCase)
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
self._logger.info(f'''Adding directory {output_dir}''')
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase)
if needs_manual_update:
with_manual_update.append(_lowerCamelCase)
with open(_lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.writelines(_lowerCamelCase)
self._logger.info(f'''Converted in {output_file}''')
for utils_file in utils_files:
try:
UpperCAmelCase__ : Optional[int] = os.path.basename(_lowerCamelCase)
UpperCAmelCase__ : int = imports_to_builder_map[f_name.replace(""".py""" , """""")]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''')
shutil.copy(_lowerCamelCase , _lowerCamelCase)
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''')
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''') | 407 | 1 |
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_lowerCamelCase : str = n - k
# Calculate C(n,k)
for i in range(__A ):
result *= n - i
result //= i + 1
return result
def A__ ( __A ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __A ) // (node_count + 1)
def A__ ( __A ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
_lowerCamelCase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A__ ( __A ):
'''simple docstring'''
return catalan_number(__A ) * factorial(__A )
if __name__ == "__main__":
lowerCAmelCase : List[Any] =int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Optional[int] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
__UpperCamelCase : Optional[Any] = [file for file in filepaths if """ """ in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
__UpperCamelCase : List[Any] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
__UpperCamelCase : Any = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
__UpperCamelCase : List[str] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 713 |
def a_ ( _A = 4000000 ) -> int:
"""simple docstring"""
snake_case__ = [0, 1]
snake_case__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case__ = 0
for j in range(len(_A ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 372 | 0 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
a_ : List[Any] = logging.getLogger(__name__)
class __UpperCamelCase ( A__ ):
lowercase : Union[str, Any] ='masked_bert'
def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=0, lowerCAmelCase="topK", lowerCAmelCase="constant", lowerCAmelCase=0.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =pruning_method
lowerCamelCase_ =mask_init
lowerCamelCase_ =mask_scale
| 676 |
import argparse
import json
import subprocess
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : int = []
lowercase : int = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowercase : Any = subprocess.run(_UpperCamelCase, shell=_UpperCamelCase, stdout=subprocess.PIPE )
lowercase : Optional[Any] = output.stdout.decode('''utf-8''' )
lowercase : Any = json.loads(_UpperCamelCase )
lowercase : Dict = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''', '''w''' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
lowercase : int = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
return values.split(''',''' )
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
__a = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 319 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
SCREAMING_SNAKE_CASE : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase_ :Any = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase_ :List[Any] = bs[:]
lowercase_ :Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
lowercase_ :Union[str, Any] = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
lowercase_ :Dict = set()
lowercase_ :Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ :Union[str, Any] = char
return pairs
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[str] =VOCAB_FILES_NAMES
lowercase : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowercase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
lowercase_ :Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowercase_ :str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowercase_ :Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
lowercase_ :str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowercase_ :Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowercase_ :Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ :Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowercase_ :List[Any] = json.load(UpperCamelCase_ )
lowercase_ :List[Any] = {v: k for k, v in self.encoder.items()}
lowercase_ :List[Any] = errors # how to handle errors in decoding
lowercase_ :List[str] = bytes_to_unicode()
lowercase_ :Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowercase_ :int = merges_handle.read().split('''\n''' )[1:-1]
lowercase_ :str = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ :Tuple = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase_ :List[Any] = {}
lowercase_ :Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ :Optional[int] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCamelCase ( self ):
return len(self.encoder )
def UpperCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
lowercase_ :int = tuple(UpperCamelCase_ )
lowercase_ :int = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowercase_ :str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ :List[Any] = bigram
lowercase_ :Dict = []
lowercase_ :int = 0
while i < len(UpperCamelCase_ ):
try:
lowercase_ :Dict = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ :Dict = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ :Tuple = tuple(UpperCamelCase_ )
lowercase_ :int = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowercase_ :str = get_pairs(UpperCamelCase_ )
lowercase_ :List[str] = ''' '''.join(UpperCamelCase_ )
lowercase_ :str = word
return word
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
lowercase_ :List[str] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def UpperCamelCase ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :str = ''''''.join(UpperCamelCase_ )
lowercase_ :int = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ :str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ :Any = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
lowercase_ :Tuple = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
lowercase_ :Dict = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :List[Any] = [self.sep_token_id]
lowercase_ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
lowercase_ :Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
lowercase_ :Any = ''' ''' + text
return (text, kwargs)
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
return token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase_ )
lowercase_ :Dict = ''' '''.join(UpperCamelCase_ )
lowercase_ :List[Any] = self.encode(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.model_max_length:
lowercase_ :int = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 441 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase ( _a , _a , _a ) -> List[str]:
'''simple docstring'''
if openai_config_file == "":
lowercase_ :str = OpenAIGPTConfig()
else:
lowercase_ :int = OpenAIGPTConfig.from_json_file(_a )
lowercase_ :int = OpenAIGPTModel(_a )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_a , _a , _a )
# Save pytorch-model
lowercase_ :Optional[int] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase_ :List[str] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 441 | 1 |
'''simple docstring'''
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase__ )
def a ( lowerCamelCase__ = 1 / 1_23_45 ):
'''simple docstring'''
A_ : int = 0
A_ : List[str] = 0
A_ : str = 3
while True:
A_ : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase__ ):
A_ : Any = int(lowerCamelCase__ )
total_partitions += 1
if check_partition_perfect(lowerCamelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase__ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 1 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
assert column_title.isupper()
UpperCamelCase = 0
UpperCamelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
UpperCamelCase = 0
while index >= 0:
UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , SCREAMING_SNAKE_CASE_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 181 |
import math
import unittest
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(__magic_name__ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 181 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
a__ : Any = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,'''models/bert/''' ) )
A_ = self.transformer_dir
shutil.copy(
os.path.join(__snake_case ,'''src/transformers/models/bert/modeling_bert.py''' ) ,os.path.join(self.transformer_dir ,'''models/bert/modeling_bert.py''' ) ,)
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case ,__snake_case=None ):
"""simple docstring"""
A_ = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
A_ = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
A_ = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_1_9 )
A_ = black.format_str(__snake_case ,mode=__snake_case )
A_ = os.path.join(self.transformer_dir ,'''new_code.py''' )
with open(__snake_case ,'''w''' ,newline='''\n''' ) as f:
f.write(__snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=__snake_case )
with open(__snake_case ,'''r''' ) as f:
self.assertTrue(f.read() ,__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__snake_case ,__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' ,'''BertLMPredictionHead''' ,REFERENCE_CODE + '''\n''' ,)
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' ,'''BertLMPredictionHead''' ,__snake_case ,)
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' ,'''TestModelLMPredictionHead''' ,re.sub('''Bert''' ,'''TestModel''' ,__snake_case ) ,)
# Copy consistency with a really long name
A_ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' ,f'{long_class_name}LMPredictionHead' ,re.sub('''Bert''' ,__snake_case ,__snake_case ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' ,'''TestModelLMPredictionHead''' ,__snake_case ,overwrite_result=re.sub('''Bert''' ,'''TestModel''' ,__snake_case ) ,)
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
A_ , A_ = check_copies.convert_to_localized_md(
__snake_case ,__snake_case ,localized_readme['''format_model_list'''] )
self.assertFalse(__snake_case )
self.assertEqual(__snake_case ,__snake_case )
A_ , A_ = check_copies.convert_to_localized_md(
__snake_case ,__snake_case ,localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__snake_case )
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A_ , A_ = check_copies.convert_to_localized_md(
__snake_case ,__snake_case ,localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__snake_case ,__snake_case )
| 188 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
def UpperCAmelCase_ ( _UpperCAmelCase :List[Any] ) -> Optional[int]:
'''simple docstring'''
A_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
A_ = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
A_ = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A_ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
A_ = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(_UpperCAmelCase )-1}' )
if "norm" in key:
A_ = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A_ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
A_ = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(_UpperCAmelCase )-1}' )
if "layer_norm1" in key:
A_ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
A_ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
A_ = key[key.find('''block''' ) + len('''block''' )]
A_ = key.replace(f'block{idx}' , f'block.{int(_UpperCAmelCase )-1}' )
if "attn.q" in key:
A_ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
A_ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
A_ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
A_ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
A_ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
A_ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
A_ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
A_ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A_ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
A_ = key.replace(f'linear_c{idx}' , f'linear_c.{int(_UpperCAmelCase )-1}' )
if "bot_conv" in key:
A_ = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
A_ = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
A_ = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
A_ = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
A_ = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
A_ = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
A_ = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
A_ = key.replace('''module.last_layer_depth''' , '''head.head''' )
A_ = value
return new_state_dict
def UpperCAmelCase_ ( _UpperCAmelCase :List[Any] , _UpperCAmelCase :Union[str, Any] ) -> Tuple:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A_ = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
A_ = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
A_ = kv_weight[
: config.hidden_sizes[i], :
]
A_ = kv_bias[: config.hidden_sizes[i]]
A_ = kv_weight[
config.hidden_sizes[i] :, :
]
A_ = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
A_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( _UpperCAmelCase :Tuple , _UpperCAmelCase :List[Any] , _UpperCAmelCase :Union[str, Any]=False , _UpperCAmelCase :Any=None ) -> Optional[int]:
'''simple docstring'''
A_ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A_ = GLPNImageProcessor()
# prepare image
A_ = prepare_img()
A_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
A_ = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
# rename keys
A_ = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
A_ = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
A_ = model(_UpperCAmelCase )
A_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A_ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
A_ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a__ : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 188 | 1 |
'''simple docstring'''
from collections.abc import Generator
def __UpperCAmelCase ( ) -> Generator[int, None, None]:
"""simple docstring"""
__a , __a = 0, 1
while True:
__a , __a = b, a + b
yield b
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int = 1000 ) -> int:
"""simple docstring"""
__a = 1
__a = fibonacci_generator()
while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 270 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowerCamelCase , 'depth_multiplier' ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=0.25 , lowerCamelCase=8 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=32 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu6" , lowerCamelCase=1280 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=None , ) ->Optional[int]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = depth_divisible_by
__a = min_depth
__a = expand_ratio
__a = tf_padding
__a = output_stride
__a = first_layer_is_expansion
__a = finegrained_output
__a = hidden_act
__a = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Tuple:
'''simple docstring'''
__a = MobileNetVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Dict:
'''simple docstring'''
__a = self.num_labels
__a = MobileNetVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
__a = self.num_labels
__a = MobileNetVaForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__a =(
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__a =(
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a =False
__a =False
__a =False
__a =False
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = MobileNetVaModelTester(self )
__a = MobileNetVaConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
pass
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__a = outputs.hidden_states
__a = 16
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileNetVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors='pt' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
__a = model.to(lowerCamelCase )
__a = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors='pt' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowerCamelCase )
__a = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 ) ) | 270 | 1 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase__ = {
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( lowercase : str ):
snake_case_ = EfficientNetConfig()
snake_case_ = CONFIG_MAP[model_name]["hidden_dim"]
snake_case_ = CONFIG_MAP[model_name]["width_coef"]
snake_case_ = CONFIG_MAP[model_name]["depth_coef"]
snake_case_ = CONFIG_MAP[model_name]["image_size"]
snake_case_ = CONFIG_MAP[model_name]["dropout_rate"]
snake_case_ = CONFIG_MAP[model_name]["dw_padding"]
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = 1_000
snake_case_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(lowercase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def __snake_case ( lowercase : Optional[Any] ):
snake_case_ = CONFIG_MAP[model_name]["image_size"]
snake_case_ = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=lowercase , )
return preprocessor
def __snake_case ( lowercase : Optional[Any] ):
snake_case_ = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
snake_case_ = sorted(set(lowercase ) )
snake_case_ = len(lowercase )
snake_case_ = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
snake_case_ = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
snake_case_ = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
snake_case_ = {}
for item in rename_keys:
if item[0] in original_param_names:
snake_case_ = "efficientnet." + item[1]
snake_case_ = "classifier.weight"
snake_case_ = "classifier.bias"
return key_mapping
def __snake_case ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : Any ):
for key, value in tf_params.items():
if "normalization" in key:
continue
snake_case_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
snake_case_ = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
snake_case_ = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
snake_case_ = torch.from_numpy(np.transpose(lowercase ) )
else:
snake_case_ = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def __snake_case ( lowercase : Dict , lowercase : str , lowercase : str , lowercase : Any ):
snake_case_ = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1_000 , classifier_activation="softmax" , )
snake_case_ = original_model.trainable_variables
snake_case_ = original_model.non_trainable_variables
snake_case_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
snake_case_ = param.numpy()
snake_case_ = list(tf_params.keys() )
# Load HuggingFace model
snake_case_ = get_efficientnet_config(lowercase )
snake_case_ = EfficientNetForImageClassification(lowercase ).eval()
snake_case_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
snake_case_ = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
snake_case_ = convert_image_processor(lowercase )
snake_case_ = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
snake_case_ = hf_model(**lowercase )
snake_case_ = outputs.logits.detach().numpy()
# Original model inference
snake_case_ = False
snake_case_ = CONFIG_MAP[model_name]["image_size"]
snake_case_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
snake_case_ = image.img_to_array(lowercase )
snake_case_ = np.expand_dims(lowercase , axis=0 )
snake_case_ = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
snake_case_ = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 508 |
'''simple docstring'''
def __snake_case ( lowercase : int ):
snake_case_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __snake_case ( lowercase : int ):
snake_case_ = 0
while number > 0:
snake_case_ = number % 10
sum_of_digits += last_digit
snake_case_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __snake_case ( lowercase : int = 100 ):
snake_case_ = factorial(lowercase )
snake_case_ = split_and_add(lowercase )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 508 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A : Optional[int] ={'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Any = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 591 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = end or len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_SCREAMING_SNAKE_CASE = array[temp_index - 1]
temp_index -= 1
_SCREAMING_SNAKE_CASE = temp_index_value
return array
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: # Max Heap
"""simple docstring"""
_SCREAMING_SNAKE_CASE = index
_SCREAMING_SNAKE_CASE = 2 * index + 1 # Left Node
_SCREAMING_SNAKE_CASE = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_SCREAMING_SNAKE_CASE = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_SCREAMING_SNAKE_CASE = right_index
if largest != index:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(n - 1 , 0 , -1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ )
return array
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = low
_SCREAMING_SNAKE_CASE = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[j], array[i]
i += 1
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
_SCREAMING_SNAKE_CASE = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE_ ) ) )
_SCREAMING_SNAKE_CASE = 16
return intro_sort(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE_ )
max_depth -= 1
_SCREAMING_SNAKE_CASE = median_of_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , start + ((end - start) // 2) + 1 , end - 1 )
_SCREAMING_SNAKE_CASE = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
intro_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = p
return insertion_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : Tuple = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase__ : List[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 591 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a__ ( unittest.TestCase ):
lowerCamelCase__: List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__: Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__: List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__: Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase( self : int ):
a_ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
a_ : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
a_ : Optional[int] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
a_ : Optional[Any] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
a_ : Dict = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
a_ : Tuple = text_classifier("""This is great !""" , return_all_scores=lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
a_ : Dict = text_classifier("""This is great !""" , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
a_ : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
a_ : List[str] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] , )
@require_torch
def UpperCAmelCase( self : Dict ):
import torch
a_ : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
a_ : Any = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def UpperCAmelCase( self : Tuple ):
a_ : Tuple = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
a_ : Optional[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def UpperCAmelCase( self : Dict ):
a_ : List[str] = pipeline("""text-classification""" )
a_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
a_ : int = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
a_ : Dict = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def UpperCAmelCase( self : Optional[int] ):
a_ : Union[str, Any] = pipeline("""text-classification""" , framework="""tf""" )
a_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
a_ : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
a_ : Tuple = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def UpperCAmelCase( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
a_ : Tuple = TextClassificationPipeline(model=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase( self : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
a_ : List[Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a_ : List[Any] = """HuggingFace is in"""
a_ : Optional[int] = text_classifier(lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{"""label""": ANY(lowerCamelCase_ ), """score""": ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
a_ : int = ["""HuggingFace is in """, """Paris is in France"""]
a_ : Optional[Any] = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{"""label""": ANY(lowerCamelCase_ ), """score""": ANY(lowerCamelCase_ )}, {"""label""": ANY(lowerCamelCase_ ), """score""": ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a_ : List[Any] = text_classifier(lowerCamelCase_ , top_k=lowerCamelCase_ )
a_ : Optional[int] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{"""label""": ANY(lowerCamelCase_ ), """score""": ANY(lowerCamelCase_ )}] * N, [{"""label""": ANY(lowerCamelCase_ ), """score""": ANY(lowerCamelCase_ )}] * N] , )
a_ : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
a_ : List[str] = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {"""label""": ANY(lowerCamelCase_ ), """score""": ANY(lowerCamelCase_ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a_ : int = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(lowerCamelCase_ ):
text_classifier(lowerCamelCase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a_ : Union[str, Any] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{"""label""": ANY(lowerCamelCase_ ), """score""": ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 478 |
from datetime import datetime as dt
import os
from github import Github
__lowerCamelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _a ( ):
a_ : List[str] = Github(os.environ["""GITHUB_TOKEN"""] )
a_ : str = g.get_repo("""huggingface/transformers""" )
a_ : List[str] = repo.get_issues(state="""open""" )
for issue in open_issues:
a_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __UpperCamelCase : i.created_at , reverse=__UpperCamelCase )
a_ : List[Any] = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 478 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Tuple =batch_size
lowercase : List[str] =image_size
lowercase : List[Any] =num_channels
lowercase : Union[str, Any] =num_stages
lowercase : int =hidden_sizes
lowercase : Any =depths
lowercase : Tuple =is_training
lowercase : str =use_labels
lowercase : List[Any] =intermediate_size
lowercase : int =hidden_act
lowercase : Union[str, Any] =num_labels
lowercase : Optional[int] =initializer_range
lowercase : int =out_features
lowercase : List[str] =out_indices
lowercase : str =scope
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[int] =model(UpperCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : Optional[Any] =None
lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[str] =config_and_inputs
lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : Optional[int] =True
if model_class.__name__ in [
*get_values(UpperCAmelCase__ ),
*get_values(UpperCAmelCase__ ),
]:
continue
lowercase : Dict =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : List[Any] =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : List[Any] =False
lowercase : Any =True
if (
model_class.__name__
in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : Any =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : int =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(UpperCAmelCase__ )
lowercase : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : int =[*signature.parameters.keys()]
lowercase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
lowercase : int =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ )
lowercase : int =self.default_image_processor
lowercase : List[str] =prepare_img()
lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(**UpperCAmelCase__ )
# verify the logits
lowercase : Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase : Optional[int] =0
# Doctest custom flag to ignore output.
UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""")
UpperCamelCase_ = doctest.OutputChecker
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = CustomOutputChecker
UpperCamelCase_ = HfDoctestModule
UpperCamelCase_ = HfDocTestParser
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCAmelCase_ : int = re.compile(r"""\s+""")
def __A ( UpperCAmelCase ) -> int:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCAmelCase ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __A ( UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Dict = [len(UpperCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(UpperCAmelCase ), "line_max": max(UpperCAmelCase )}
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : List[Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Any:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __A ( UpperCAmelCase ,UpperCAmelCase=5 ) -> str:
'''simple docstring'''
_UpperCamelCase : List[Any] = ["auto-generated", "autogenerated", "automatically generated"]
_UpperCamelCase : int = example["content"].splitlines()
for _, line in zip(range(UpperCAmelCase ) ,UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __A ( UpperCAmelCase ,UpperCAmelCase=5 ,UpperCAmelCase=0.05 ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = ["unit tests", "test file", "configuration file"]
_UpperCamelCase : str = example["content"].splitlines()
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Optional[int] = 0
# first test
for _, line in zip(range(UpperCAmelCase ) ,UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCamelCase : Optional[int] = example["content"].count("\n" )
_UpperCamelCase : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __A ( UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = ["def ", "class ", "for ", "while "]
_UpperCamelCase : Any = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __A ( UpperCAmelCase ,UpperCAmelCase=4 ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : int = example["content"].splitlines()
_UpperCamelCase : List[Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __A ( UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = tokenizer(example["content"] ,truncation=UpperCAmelCase )["input_ids"]
_UpperCamelCase : int = len(example["content"] ) / len(UpperCAmelCase )
return {"ratio": ratio}
def __A ( UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = {}
results.update(get_hash(UpperCAmelCase ) )
results.update(line_stats(UpperCAmelCase ) )
results.update(alpha_stats(UpperCAmelCase ) )
results.update(char_token_ratio(UpperCAmelCase ) )
results.update(is_autogenerated(UpperCAmelCase ) )
results.update(is_config_or_test(UpperCAmelCase ) )
results.update(has_no_keywords(UpperCAmelCase ) )
results.update(has_few_assignments(UpperCAmelCase ) )
return results
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if not check_uniques(UpperCAmelCase ,UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __A ( UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with open(UpperCAmelCase ,"rb" ) as f_in:
with gzip.open(str(UpperCAmelCase ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCAmelCase ,UpperCAmelCase )
os.unlink(UpperCAmelCase )
# Settings
lowerCAmelCase_ : Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCAmelCase_ : List[str] = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase_ : Union[str, Any] = multiprocessing.cpu_count()
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCAmelCase_ : Optional[int] = time.time()
lowerCAmelCase_ : Dict = load_dataset(args.dataset_name, split="""train""")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
lowerCAmelCase_ : Union[str, Any] = time.time()
lowerCAmelCase_ : Tuple = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
lowerCAmelCase_ : Optional[int] = set(ds.unique("""hash"""))
lowerCAmelCase_ : List[Any] = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
lowerCAmelCase_ : Optional[Any] = time.time()
lowerCAmelCase_ : List[str] = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCAmelCase_ : Optional[Any] = time.time()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
lowerCAmelCase_ : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
lowerCAmelCase_ : List[Any] = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
lowerCAmelCase_ : Dict = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCAmelCase_ : Optional[int] = str(data_dir / f"""file-{file_number+1:012}.json""")
lowerCAmelCase_ : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 435 | '''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase_ : int = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
lowerCAmelCase_ : Optional[int] = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
lowerCAmelCase_ : Any = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def snake_case__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : Tuple , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=1 , lowercase__ : Optional[int]="binary" , lowercase__ : int=None ) ->int:
'''simple docstring'''
_UpperCamelCase : List[str] = fa_score(
lowercase__ , lowercase__ , labels=lowercase__ , pos_label=lowercase__ , average=lowercase__ , sample_weight=lowercase__ )
return {"f1": float(lowercase__ ) if score.size == 1 else score}
| 435 | 1 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
UpperCAmelCase = logging.getLogger(__name__)
def A ( A_ : str ):
snake_case : Any = git.Repo(search_parent_directories=A_ )
snake_case : List[str] = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(A_ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(A_ , A_ , indent=4 )
def A ( A_ : Tuple ):
if params.n_gpu <= 0:
snake_case : Tuple = 0
snake_case : Dict = -1
snake_case : Union[str, Any] = True
snake_case : int = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case : Dict = int(os.environ['''WORLD_SIZE'''] )
snake_case : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
snake_case : Optional[int] = int(os.environ['''RANK'''] )
# number of nodes / node ID
snake_case : Optional[int] = params.world_size // params.n_gpu_per_node
snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node
snake_case : Any = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case : Optional[Any] = 1
snake_case : Union[str, Any] = 0
snake_case : Optional[Any] = 0
snake_case : str = 0
snake_case : List[str] = 1
snake_case : Tuple = 1
snake_case : Optional[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case : Any = params.node_id == 0 and params.local_rank == 0
snake_case : str = params.n_nodes > 1
# summary
snake_case : int = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def A ( A_ : Any ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 711 |
'''simple docstring'''
from __future__ import annotations
def A ( A_ : list[int] , A_ : int ):
snake_case : list[list[int]] = []
snake_case : list[int] = []
snake_case : int = 0
snake_case : int = sum(A_ )
create_state_space_tree(A_ , A_ , A_ , A_ , A_ , A_ )
return result
def A ( A_ : list[int] , A_ : int , A_ : int , A_ : list[int] , A_ : list[list[int]] , A_ : int , ):
if sum(A_ ) > max_sum or (remaining_nums_sum + sum(A_ )) < max_sum:
return
if sum(A_ ) == max_sum:
result.append(A_ )
return
for index in range(A_ , len(A_ ) ):
create_state_space_tree(
A_ , A_ , index + 1 , [*path, nums[index]] , A_ , remaining_nums_sum - nums[index] , )
UpperCAmelCase = [3, 34, 4, 12, 5, 2]
UpperCAmelCase = 9
UpperCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 555 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = parent
_UpperCAmelCase = 13
_UpperCAmelCase = 7
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = 2
_UpperCAmelCase = 99
_UpperCAmelCase = 0
_UpperCAmelCase = 32
_UpperCAmelCase = 2
_UpperCAmelCase = 4
_UpperCAmelCase = 0.1
_UpperCAmelCase = 0.1
_UpperCAmelCase = 512
_UpperCAmelCase = 16
_UpperCAmelCase = 2
_UpperCAmelCase = 0.02
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = """last"""
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = 0
def UpperCAmelCase ( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
_UpperCAmelCase = None
if self.use_input_lengths:
_UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
__a = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__a = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__a = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__a = False
__a = False
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
_UpperCAmelCase = TFFlaubertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
_UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
_UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 518 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Dict:
_UpperCAmelCase = np.argmax(snake_case , axis=1 )
return np.sum(outputs == labels )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
with open(snake_case , encoding="""utf_8""" ) as f:
_UpperCAmelCase = csv.reader(snake_case )
_UpperCAmelCase = []
next(snake_case ) # skip the first line
for line in tqdm(snake_case ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = []
for dataset in encoded_datasets:
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_UpperCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
_UpperCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
_UpperCAmelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(snake_case ):
_UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase = with_conta
_UpperCAmelCase = with_conta
_UpperCAmelCase = len(snake_case ) - 1
_UpperCAmelCase = len(snake_case ) - 1
_UpperCAmelCase = with_conta
_UpperCAmelCase = with_conta
_UpperCAmelCase = mc_label
_UpperCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(snake_case ) for t in all_inputs ) )
return tensor_datasets
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=snake_case , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=snake_case , type=snake_case , required=snake_case , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=snake_case , default="""""" )
parser.add_argument("""--eval_dataset""" , type=snake_case , default="""""" )
parser.add_argument("""--seed""" , type=snake_case , default=4_2 )
parser.add_argument("""--num_train_epochs""" , type=snake_case , default=3 )
parser.add_argument("""--train_batch_size""" , type=snake_case , default=8 )
parser.add_argument("""--eval_batch_size""" , type=snake_case , default=1_6 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=snake_case , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=snake_case , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=snake_case , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=snake_case , default=6.25E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=snake_case , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=snake_case , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=snake_case , default=0.01 )
parser.add_argument("""--lm_coef""" , type=snake_case , default=0.9 )
parser.add_argument("""--n_valid""" , type=snake_case , default=3_7_4 )
parser.add_argument("""--server_ip""" , type=snake_case , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=snake_case , default="""""" , help="""Can be used for distant debugging.""" )
_UpperCAmelCase = parser.parse_args()
print(snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
_UpperCAmelCase = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(snake_case , snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_UpperCAmelCase = ["""_start_""", """_delimiter_""", """_classify_"""]
_UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(snake_case )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case )
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(snake_case ) )
model.to(snake_case )
# Load and encode the datasets
def tokenize_and_encode(snake_case ):
if isinstance(snake_case , snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case ) )
elif isinstance(snake_case , snake_case ):
return obj
return [tokenize_and_encode(snake_case ) for o in obj]
logger.info("""Encoding dataset...""" )
_UpperCAmelCase = load_rocstories_dataset(args.train_dataset )
_UpperCAmelCase = load_rocstories_dataset(args.eval_dataset )
_UpperCAmelCase = (train_dataset, eval_dataset)
_UpperCAmelCase = tokenize_and_encode(snake_case )
# Compute the max input length for the Transformer
_UpperCAmelCase = model.config.n_positions // 2 - 2
_UpperCAmelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_UpperCAmelCase = min(snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_UpperCAmelCase = pre_process_datasets(snake_case , snake_case , snake_case , *snake_case )
_UpperCAmelCase , _UpperCAmelCase = tensor_datasets[0], tensor_datasets[1]
_UpperCAmelCase = TensorDataset(*snake_case )
_UpperCAmelCase = RandomSampler(snake_case )
_UpperCAmelCase = DataLoader(snake_case , sampler=snake_case , batch_size=args.train_batch_size )
_UpperCAmelCase = TensorDataset(*snake_case )
_UpperCAmelCase = SequentialSampler(snake_case )
_UpperCAmelCase = DataLoader(snake_case , sampler=snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_UpperCAmelCase = args.max_steps
_UpperCAmelCase = args.max_steps // (len(snake_case ) // args.gradient_accumulation_steps) + 1
else:
_UpperCAmelCase = len(snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
_UpperCAmelCase = list(model.named_parameters() )
_UpperCAmelCase = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
_UpperCAmelCase = AdamW(snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
_UpperCAmelCase = get_linear_schedule_with_warmup(
snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=snake_case )
if args.do_train:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = tqdm(snake_case , desc="""Training""" )
for step, batch in enumerate(snake_case ):
_UpperCAmelCase = tuple(t.to(snake_case ) for t in batch )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = batch
_UpperCAmelCase = model(snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
_UpperCAmelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_UpperCAmelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_UpperCAmelCase = """Training loss: {:.2e} lr: {:.2e}""".format(snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_UpperCAmelCase = model.module if hasattr(snake_case , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_UpperCAmelCase = os.path.join(args.output_dir , snake_case )
_UpperCAmelCase = os.path.join(args.output_dir , snake_case )
torch.save(model_to_save.state_dict() , snake_case )
model_to_save.config.to_json_file(snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(snake_case )
if args.do_eval:
model.eval()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for batch in tqdm(snake_case , desc="""Evaluating""" ):
_UpperCAmelCase = tuple(t.to(snake_case ) for t in batch )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = batch
with torch.no_grad():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model(
snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
_UpperCAmelCase = mc_logits.detach().cpu().numpy()
_UpperCAmelCase = mc_labels.to("""cpu""" ).numpy()
_UpperCAmelCase = accuracy(snake_case , snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_UpperCAmelCase = eval_loss / nb_eval_steps
_UpperCAmelCase = eval_accuracy / nb_eval_examples
_UpperCAmelCase = tr_loss / nb_tr_steps if args.do_train else None
_UpperCAmelCase = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
_UpperCAmelCase = os.path.join(args.output_dir , """eval_results.txt""" )
with open(snake_case , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , snake_case , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 518 | 1 |
def _snake_case ( __snake_case ):
def merge(__snake_case , __snake_case ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__snake_case ) <= 1:
return collection
_UpperCamelCase = len(__snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 71 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 71 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Dict = '''Pix2StructImageProcessor'''
__UpperCamelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 20_48 , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_patches=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE__ : Tuple = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE__ : Tuple = text_encoding.pop("""input_ids""" )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__ )
return encoding_image_processor
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 223 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase__ : Optional[Any] = 2_5_0_0_0_4
UpperCAmelCase__ : Union[str, Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = MBartTokenizer
__UpperCamelCase : Any = MBartTokenizerFast
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : int = True
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE__ : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE__ : Dict = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = '''facebook/mbart-large-en-ro'''
__UpperCamelCase : Dict = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__UpperCamelCase : Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__UpperCamelCase : Optional[int] = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def __magic_name__ (cls ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
SCREAMING_SNAKE_CASE__ : List[Any] = 1
return cls
def __magic_name__ (self ) -> int:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Dict = targets["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 223 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir('''fixtures''')
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 5_00
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ (self ) -> str:
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class _lowerCamelCase ( unittest.TestCase ):
@classmethod
def snake_case_ (cls ) -> Union[str, Any]:
UpperCamelCase = TOKEN
HfFolder.save_token(__a )
@classmethod
def snake_case_ (cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__a )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__a , repo_id="test-feature-extractor" , push_to_hub=__a , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ (self ) -> Dict:
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__a )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__a , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__a , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ (self ) -> Optional[Any]:
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase = CustomFeatureExtractor.from_pretrained(__a )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=__a )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 544 |
"""simple docstring"""
import argparse
from collections import defaultdict
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = F"class {class_name}("
UpperCamelCase = F"{4 * ' '}def {test_name}("
UpperCamelCase = F"{8 * ' '}{correct_line.split()[0]}"
UpperCamelCase = F"{16 * ' '}{correct_line.split()[0]}"
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
UpperCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCamelCase = UpperCamelCase = UpperCamelCase = UpperCamelCase = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = {l.strip() for l in f.readlines()}
else:
UpperCamelCase = None
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 544 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : Tuple ) -> Optional[int]:
print('Loading config file...' )
def flatten_yaml_as_dict(lowerCAmelCase : List[Any], lowerCAmelCase : Any="", lowerCAmelCase : Optional[int]="." ):
A = []
for k, v in d.items():
A = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase, collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase, lowerCAmelCase, sep=lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase )
A = argparse.Namespace()
with open(lowerCAmelCase, 'r' ) as yaml_file:
try:
A = yaml.load(lowerCAmelCase, Loader=yaml.FullLoader )
A = flatten_yaml_as_dict(lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(lowerCAmelCase, str(lowerCAmelCase ) ) )
return config
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : List[str] ) -> Optional[int]:
A = MobileViTVaConfig()
A = False
# dataset
if task_name.startswith('imagenet1k_' ):
A = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A = 384
else:
A = 256
A = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A = 384
else:
A = 256
A = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A = 151
A = 512
A = 'ade20k-id2label.json'
A = True
elif task_name.startswith('voc_' ):
A = 21
A = 512
A = 'pascal-voc-id2label.json'
A = True
# orig_config
A = load_orig_config_file(lowerCAmelCase )
assert getattr(lowerCAmelCase, 'model.classification.name', -1 ) == "mobilevit_v2", "Invalid model"
A = getattr(lowerCAmelCase, 'model.classification.mitv2.width_multiplier', 1.0 )
assert (
getattr(lowerCAmelCase, 'model.classification.mitv2.attn_norm_layer', -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A = getattr(lowerCAmelCase, 'model.classification.activation.name', 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A = getattr(lowerCAmelCase, 'model.segmentation.output_stride', 16 )
if "_deeplabv3" in task_name:
A = getattr(lowerCAmelCase, 'model.segmentation.deeplabv3.aspp_rates', [12, 24, 36] )
A = getattr(lowerCAmelCase, 'model.segmentation.deeplabv3.aspp_out_channels', 512 )
A = getattr(lowerCAmelCase, 'model.segmentation.deeplabv3.aspp_dropout', 0.1 )
# id2label
A = 'huggingface/label-files'
A = json.load(open(hf_hub_download(lowerCAmelCase, lowerCAmelCase, repo_type='dataset' ), 'r' ) )
A = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Dict ) -> List[Any]:
A = dct.pop(lowerCAmelCase )
A = val
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any]=False ) -> Optional[Any]:
if base_model:
A = ''
else:
A = 'mobilevitv2.'
A = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A = k[8:]
else:
A = k
if ".block." in k:
A = k_new.replace('.block.', '.' )
if ".conv." in k:
A = k_new.replace('.conv.', '.convolution.' )
if ".norm." in k:
A = k_new.replace('.norm.', '.normalization.' )
if "conv_1." in k:
A = k_new.replace('conv_1.', f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A = k_new.replace(f'''layer_{i}.''', f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A = k_new.replace('.exp_1x1.', '.expand_1x1.' )
if ".red_1x1." in k:
A = k_new.replace('.red_1x1.', '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A = k_new.replace(f'''layer_{i}.0.''', f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A = k_new.replace(f'''layer_{i}.1.local_rep.0.''', f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A = k_new.replace(f'''layer_{i}.1.local_rep.1.''', f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A = [0, 1]
elif i == 4:
A = [0, 1, 2, 3]
elif i == 5:
A = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''', f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''', f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A = k_new.replace(f'''layer_{i}.1.conv_proj.''', f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A = k_new.replace('pre_norm_attn.0.', 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A = k_new.replace('pre_norm_attn.1.', 'attention.' )
if "pre_norm_ffn.0." in k:
A = k_new.replace('pre_norm_ffn.0.', 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A = k_new.replace('pre_norm_ffn.1.', 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A = k_new.replace('pre_norm_ffn.3.', 'ffn.conv2.' )
if "classifier.1." in k:
A = k_new.replace('classifier.1.', 'classifier.' )
if "seg_head." in k:
A = k_new.replace('seg_head.', 'segmentation_head.' )
if ".aspp_layer." in k:
A = k_new.replace('.aspp_layer.', '.' )
if ".aspp_pool." in k:
A = k_new.replace('.aspp_pool.', '.' )
rename_keys.append((k, k_new) )
return rename_keys
def __UpperCamelCase (lowerCAmelCase : Any ) -> Dict:
A = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase () -> Dict:
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A = Image.open(requests.get(lowerCAmelCase, stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : Any, lowerCAmelCase : List[Any] ) -> Dict:
A = get_mobilevitva_config(lowerCAmelCase, lowerCAmelCase )
# load original state_dict
A = torch.load(lowerCAmelCase, map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A = MobileViTVaForSemanticSegmentation(lowerCAmelCase ).eval()
A = False
else:
A = MobileViTVaForImageClassification(lowerCAmelCase ).eval()
A = False
# remove and rename some keys of load the original model
A = checkpoint
remove_unused_keys(lowerCAmelCase )
A = create_rename_keys(lowerCAmelCase, base_model=lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# load modified state_dict
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
A = image_processor(images=prepare_img(), return_tensors='pt' )
A = model(**lowerCAmelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A = outputs.logits
A = logits.argmax(-1 ).item()
print('Predicted class:', model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3], lowerCAmelCase, atol=1E-4 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_UpperCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 699 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase :
def __init__( self :int , _lowercase :Dict , _lowercase :Any=13 , _lowercase :List[str]=30 , _lowercase :str=2 , _lowercase :List[str]=3 , _lowercase :List[str]=True , _lowercase :Optional[int]=True , _lowercase :Optional[Any]=32 , _lowercase :str=5 , _lowercase :List[Any]=4 , _lowercase :Tuple=37 , _lowercase :Dict="gelu" , _lowercase :Tuple=0.1 , _lowercase :List[str]=0.1 , _lowercase :Tuple=10 , _lowercase :int=0.02 , _lowercase :Dict=3 , _lowercase :Optional[Any]=None , _lowercase :Tuple=2 , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 2
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Optional[Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = DeiTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :List[str] , _lowercase :Any , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = DeiTForMaskedImageModeling(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForMaskedImageModeling(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :List[Any] , _lowercase :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = self.type_sequence_label_size
lowercase__ = DeiTForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = DeiTModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :Union[str, Any] , _lowercase :str , _lowercase :str=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowercase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(_lowercase )
model.to(_lowercase )
model.train()
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
lowercase__ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase__ = model_class(_lowercase )
model.gradient_checkpointing_enable()
model.to(_lowercase )
model.train()
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
lowercase__ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowercase ),
*get_values(_lowercase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
lowercase__ = problem_type["title"]
lowercase__ = problem_type["num_labels"]
lowercase__ = model_class(_lowercase )
model.to(_lowercase )
model.train()
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if problem_type["num_labels"] > 1:
lowercase__ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowercase__ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowercase ) as warning_list:
lowercase__ = model(**_lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DeiTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _A ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_lowercase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_lowercase )
# verify the logits
lowercase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowercase )
lowercase__ = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_lowercase , return_tensors="pt" )
lowercase__ = inputs.pixel_values.to(_lowercase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(_lowercase )
| 611 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1024 ):
lowercase__ , lowercase__ = [], []
lowercase__ = list(zip(__magic_name__ , __magic_name__ ) )
lowercase__ , lowercase__ = sorted_examples[0]
def is_too_big(__magic_name__ ):
return tok(__magic_name__ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase__ = new_src + " " + src
lowercase__ = new_tgt + " " + tgt
if is_too_big(__magic_name__ ) or is_too_big(__magic_name__ ): # cant fit, finalize example
finished_src.append(__magic_name__ )
finished_tgt.append(__magic_name__ )
lowercase__ , lowercase__ = src, tgt
else: # can fit, keep adding
lowercase__ , lowercase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__magic_name__ )
finished_tgt.append(__magic_name__ )
return finished_src, finished_tgt
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = Path(__magic_name__ )
save_path.mkdir(exist_ok=__magic_name__ )
for split in ["train"]:
lowercase__ , lowercase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
lowercase__ = [x.rstrip() for x in Path(__magic_name__ ).open().readlines()]
lowercase__ = [x.rstrip() for x in Path(__magic_name__ ).open().readlines()]
lowercase__ , lowercase__ = pack_examples(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
print(f'''packed {split} split from {len(__magic_name__ )} examples -> {len(__magic_name__ )}.''' )
Path(save_path / f'''{split}.source''' ).open("w" ).write("\n".join(__magic_name__ ) )
Path(save_path / f'''{split}.target''' ).open("w" ).write("\n".join(__magic_name__ ) )
for split in ["val", "test"]:
lowercase__ , lowercase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(__magic_name__ , save_path / f'''{split}.source''' )
shutil.copyfile(__magic_name__ , save_path / f'''{split}.target''' )
def _A ( ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__magic_name__ , default=128 )
parser.add_argument("--data_dir" , type=__magic_name__ )
parser.add_argument("--save_path" , type=__magic_name__ )
lowercase__ = parser.parse_args()
lowercase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__magic_name__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 611 | 1 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[Any] = order
# a_{0} ... a_{k}
UpperCamelCase : Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase : Optional[int] = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase : Optional[int] = [0.0] * self.order
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) < self.order:
UpperCamelCase : List[Any] = [1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE_ ) != self.order + 1:
UpperCamelCase : Any = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE_ )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != self.order + 1:
UpperCamelCase : Optional[Any] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE_ )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = a_coeffs
UpperCamelCase : List[str] = b_coeffs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> float:
UpperCamelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1, self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase : List[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase : Tuple = self.input_history[:-1]
UpperCamelCase : Optional[Any] = self.output_history[:-1]
UpperCamelCase : Optional[int] = sample
UpperCamelCase : Dict = result
return result
| 40 |
from math import factorial
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
A : Union[str, Any] = real
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
A : str = [1] * rank
else:
A : Dict = rank
def __repr__( self ) -> List[str]:
return (
f'{self.real}+'
f'{"+".join(str(__UpperCAmelCase )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def snake_case ( self ) -> Union[str, Any]:
A : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __UpperCAmelCase )
def __add__( self , __UpperCAmelCase ) -> Any:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return Dual(self.real + other , self.duals )
A : int = self.duals.copy()
A : Any = other.duals.copy()
if len(__UpperCAmelCase ) > len(__UpperCAmelCase ):
o_dual.extend([1] * (len(__UpperCAmelCase ) - len(__UpperCAmelCase )) )
elif len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
s_dual.extend([1] * (len(__UpperCAmelCase ) - len(__UpperCAmelCase )) )
A : List[str] = []
for i in range(len(__UpperCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __UpperCAmelCase )
UpperCAmelCase_ : int = __add__
def __sub__( self , __UpperCAmelCase ) -> List[str]:
return self + other * -1
def __mul__( self , __UpperCAmelCase ) -> List[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
A : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __UpperCAmelCase )
A : Optional[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __UpperCAmelCase )
UpperCAmelCase_ : int = __mul__
def __truediv__( self , __UpperCAmelCase ) -> Optional[int]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
A : Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __UpperCAmelCase )
raise ValueError
def __floordiv__( self , __UpperCAmelCase ) -> Union[str, Any]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
A : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __UpperCAmelCase )
raise ValueError
def __pow__( self , __UpperCAmelCase ) -> Any:
if n < 0 or isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
A : Dict = self
for _ in range(n - 1 ):
x *= self
return x
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not callable(lowerCamelCase_ ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(lowerCamelCase_ , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''differentiate() requires an int as input for order''' )
A : Any = Dual(lowerCamelCase_ , 1 )
A : Dict = func(lowerCamelCase_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def snake_case__ ( lowerCamelCase_ ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 542 | 0 |
'''simple docstring'''
lowercase = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
lowercase = {value: key for key, value in encode_dict.items()}
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if set(UpperCAmelCase__ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
__SCREAMING_SNAKE_CASE : List[Any] = ""
for word in coded.split():
while len(UpperCAmelCase__ ) != 0:
decoded += decode_dict[word[:5]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = IFInpaintingSuperResolutionPipeline
snake_case__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
snake_case__ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a_ ( self ):
return self._get_superresolution_dummy_components()
def a_ ( self , a__ , a__=0 ):
if str(a__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(a__ )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a_ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def a_ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a_ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a_ ( self ):
self._test_save_load_local()
def a_ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 564 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> bool:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
raise ValueError('check_bouncy() accepts only integer arguments')
UpperCamelCase__ : Any = str(lowerCamelCase_)
UpperCamelCase__ : Tuple = ''.join(sorted(lowerCamelCase_))
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ = 99) -> int:
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100')
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : Optional[int] = 1
while True:
if check_bouncy(lowerCamelCase_):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 596 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-1'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-2'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-3'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-4'
class __lowercase (__lowerCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , UpperCAmelCase_ : bool = True , ):
super()._init_()
UpperCamelCase__ : int = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = StableDiffusionPipeline(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , requires_safety_checker=UpperCAmelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def __UpperCamelCase ( self : Optional[Any]):
return {k: getattr(self , UpperCAmelCase_) for k in self.config.keys() if not k.startswith('_')}
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[Union[str, int]] = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
self.enable_attention_slicing(UpperCAmelCase_)
@torch.no_grad()
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Optional[int] , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : str , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Dict , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
UpperCamelCase__ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(UpperCAmelCase_)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.')
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ : Dict = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ : List[str] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 596 | 1 |
"""simple docstring"""
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->int:
UpperCAmelCase__ = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase__ = 1
for n in range(m + 1 ):
for k in range(1 , _SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a : Dict = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
a : int = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 422 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : Tuple = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__lowercase : int = 'roformer'
def __init__( self , __lowercase=50000 , __lowercase=None , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1536 , __lowercase=2 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=0 , __lowercase=False , __lowercase=True , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , **__lowercase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = rotary_value
UpperCAmelCase__ = use_cache
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ = {0: """batch""", 1: """sequence"""}
UpperCAmelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 422 | 1 |
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self : int ):
'''simple docstring'''
__a = (0, 0)
__a = None
__a = 0
__a = 0
__a = 0
def __eq__( self : Union[str, Any] , __lowercase : Tuple ):
'''simple docstring'''
return self.position == cell.position
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
print(self.position )
class SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : str=(5, 5) ):
'''simple docstring'''
__a = np.zeros(__lowercase )
__a = world_size[0]
__a = world_size[1]
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
print(self.w )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__a = cell.position[0]
__a = cell.position[1]
__a = []
for n in neughbour_cord:
__a = current_x + n[0]
__a = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__a = Cell()
__a = (x, y)
__a = cell
neighbours.append(__lowercase )
return neighbours
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = []
__a = []
_open.append(_SCREAMING_SNAKE_CASE )
while _open:
__a = np.argmin([n.f for n in _open] )
__a = _open[min_f]
_closed.append(_open.pop(_SCREAMING_SNAKE_CASE ) )
if current == goal:
break
for n in world.get_neigbours(_SCREAMING_SNAKE_CASE ):
for c in _closed:
if c == n:
continue
__a = current.g + 1
__a , __a = n.position
__a , __a = goal.position
__a = (ya - ya) ** 2 + (xa - xa) ** 2
__a = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_SCREAMING_SNAKE_CASE )
__a = []
while current.parent is not None:
path.append(current.position )
__a = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCamelCase__ = Gridworld()
# Start position and goal
lowerCamelCase__ = Cell()
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = Cell()
lowerCamelCase__ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowerCamelCase__ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCamelCase__ = 1
print(world.w)
| 225 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict=True ):
"""simple docstring"""
model.train()
__a = model(_SCREAMING_SNAKE_CASE )
__a = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
set_seed(42 )
__a = RegressionModel()
__a = deepcopy(_SCREAMING_SNAKE_CASE )
__a = RegressionDataset(length=80 )
__a = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=16 )
model.to(accelerator.device )
if sched:
__a = AdamW(params=model.parameters() , lr=1e-3 )
__a = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__a = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.65 )
__a = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__a , __a , __a , __a = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
__a , __a , __a = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__a , __a = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__a = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__a , __a = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__a = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
__a = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__a , __a , __a = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a , __a = batch.values()
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__a = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any]=False , _SCREAMING_SNAKE_CASE : Tuple=False ):
"""simple docstring"""
__a = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__a , __a , __a , __a , __a , __a , __a = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a , __a = batch.values()
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
__a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Accelerator()
__a = RegressionDataset(length=80 )
__a = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=16 )
__a = RegressionDataset(length=96 )
__a = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=16 )
__a , __a = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Accelerator()
__a = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 225 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class lowercase__ :
def __init__( self ):
lowerCAmelCase_ : Optional[Any] = {}
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=1 ):
if self.graph.get(_lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCAmelCase_ : Tuple = [[w, v]]
if not self.graph.get(_lowercase ):
lowerCAmelCase_ : Any = []
def UpperCAmelCase__ ( self ):
return list(self.graph )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
if self.graph.get(_lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=-2 , _lowercase=-1 ):
if s == d:
return []
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Any = []
if s == -2:
lowerCAmelCase_ : Dict = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
lowerCAmelCase_ : str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowercase ) != 0:
lowerCAmelCase_ : Optional[int] = stack[len(_lowercase ) - 1]
else:
lowerCAmelCase_ : Optional[int] = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return visited
def UpperCAmelCase__ ( self , _lowercase=-1 ):
if c == -1:
lowerCAmelCase_ : Dict = floor(random() * 10_000 ) + 10
for i in range(_lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowercase , _lowercase , 1 )
def UpperCAmelCase__ ( self , _lowercase=-2 ):
lowerCAmelCase_ : List[Any] = deque()
lowerCAmelCase_ : Optional[int] = []
if s == -2:
lowerCAmelCase_ : Union[str, Any] = list(self.graph )[0]
d.append(_lowercase )
visited.append(_lowercase )
while d:
lowerCAmelCase_ : Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self , _lowercase ):
return len(self.graph[u] )
def UpperCAmelCase__ ( self , _lowercase=-2 ):
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Any = []
if s == -2:
lowerCAmelCase_ : List[str] = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
lowerCAmelCase_ : Optional[int] = s
lowerCAmelCase_ : Optional[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_lowercase ) != 0:
lowerCAmelCase_ : List[str] = stack[len(_lowercase ) - 1]
else:
lowerCAmelCase_ : Dict = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Any = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
lowerCAmelCase_ : Tuple = -2
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Any = s
lowerCAmelCase_ : int = False
lowerCAmelCase_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ : List[Any] = len(_lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ : Tuple = True
if len(_lowercase ) != 0:
lowerCAmelCase_ : Optional[int] = stack[len(_lowercase ) - 1]
else:
lowerCAmelCase_ : int = False
indirect_parents.append(_lowercase )
lowerCAmelCase_ : Optional[int] = s
lowerCAmelCase_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return list(_lowercase )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Dict = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
lowerCAmelCase_ : Optional[Any] = -2
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = s
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ : Optional[Any] = len(_lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ : Dict = True
if len(_lowercase ) != 0:
lowerCAmelCase_ : Any = stack[len(_lowercase ) - 1]
else:
lowerCAmelCase_ : Any = False
indirect_parents.append(_lowercase )
lowerCAmelCase_ : Dict = s
lowerCAmelCase_ : Optional[int] = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return False
def UpperCAmelCase__ ( self , _lowercase=-2 , _lowercase=-1 ):
lowerCAmelCase_ : Optional[Any] = time()
self.dfs(_lowercase , _lowercase )
lowerCAmelCase_ : Tuple = time()
return end - begin
def UpperCAmelCase__ ( self , _lowercase=-2 ):
lowerCAmelCase_ : Any = time()
self.bfs(_lowercase )
lowerCAmelCase_ : str = time()
return end - begin
class lowercase__ :
def __init__( self ):
lowerCAmelCase_ : Optional[Any] = {}
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=1 ):
# check if the u exists
if self.graph.get(_lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCAmelCase_ : int = [[w, v]]
# add the other way
if self.graph.get(_lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCAmelCase_ : List[str] = [[w, u]]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
if self.graph.get(_lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowercase )
# the other way round
if self.graph.get(_lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=-2 , _lowercase=-1 ):
if s == d:
return []
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Tuple = []
if s == -2:
lowerCAmelCase_ : Dict = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
lowerCAmelCase_ : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowercase ) != 0:
lowerCAmelCase_ : Tuple = stack[len(_lowercase ) - 1]
else:
lowerCAmelCase_ : Tuple = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return visited
def UpperCAmelCase__ ( self , _lowercase=-1 ):
if c == -1:
lowerCAmelCase_ : int = floor(random() * 10_000 ) + 10
for i in range(_lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase_ : Tuple = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowercase , _lowercase , 1 )
def UpperCAmelCase__ ( self , _lowercase=-2 ):
lowerCAmelCase_ : str = deque()
lowerCAmelCase_ : List[str] = []
if s == -2:
lowerCAmelCase_ : Dict = list(self.graph )[0]
d.append(_lowercase )
visited.append(_lowercase )
while d:
lowerCAmelCase_ : Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self , _lowercase ):
return len(self.graph[u] )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : List[str] = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
lowerCAmelCase_ : List[Any] = -2
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : int = s
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ : str = len(_lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ : Dict = True
if len(_lowercase ) != 0:
lowerCAmelCase_ : int = stack[len(_lowercase ) - 1]
else:
lowerCAmelCase_ : Optional[int] = False
indirect_parents.append(_lowercase )
lowerCAmelCase_ : Optional[int] = s
lowerCAmelCase_ : List[str] = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return list(_lowercase )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
lowerCAmelCase_ : str = -2
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Tuple = s
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ : List[Any] = len(_lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ : Any = True
if len(_lowercase ) != 0:
lowerCAmelCase_ : Optional[int] = stack[len(_lowercase ) - 1]
else:
lowerCAmelCase_ : List[Any] = False
indirect_parents.append(_lowercase )
lowerCAmelCase_ : Optional[int] = s
lowerCAmelCase_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return False
def UpperCAmelCase__ ( self ):
return list(self.graph )
def UpperCAmelCase__ ( self , _lowercase=-2 , _lowercase=-1 ):
lowerCAmelCase_ : Union[str, Any] = time()
self.dfs(_lowercase , _lowercase )
lowerCAmelCase_ : Union[str, Any] = time()
return end - begin
def UpperCAmelCase__ ( self , _lowercase=-2 ):
lowerCAmelCase_ : Tuple = time()
self.bfs(_lowercase )
lowerCAmelCase_ : List[str] = time()
return end - begin
| 440 |
UpperCAmelCase_ : str = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCAmelCase ( _a : str ) -> int:
lowerCAmelCase_ : Any = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
lowerCAmelCase_ : Stack[int] = Stack()
lowerCAmelCase_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_a ) )
elif i in operators:
# RULE 2
operator_stack.push(_a )
elif i == ")":
# RULE 4
lowerCAmelCase_ : Optional[int] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase_ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ : Dict = operators[opr](_a , _a )
operand_stack.push(_a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase_ : Dict = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 440 | 1 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
_A = 2**power
_A = str(_snake_case )
_A = list(_snake_case )
_A = 0
for i in list_num:
sum_of_num += int(_snake_case )
return sum_of_num
if __name__ == "__main__":
a = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
a = solution(power)
print('''Sum of the digits is: ''', result)
| 7 |
"""simple docstring"""
import argparse
a = '''docs/source/_static/js/custom.js'''
def _snake_case ( _snake_case : Dict ) -> Any:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
_A = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
_A = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
a = parser.parse_args()
update_custom_js(args.version)
| 7 | 1 |
from math import factorial
__a : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowercase ) )
def _SCREAMING_SNAKE_CASE ( __lowercase : int = 6_0 , __lowercase : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or not isinstance(__lowercase , __lowercase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
__A = 0
# the cached sizes of the previous chains
__A = {}
for start_chain_element in range(1 , __lowercase ):
# The temporary set will contain the elements of the chain
__A = set()
__A = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__A = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowercase )
chain_set_length += 1
__A = digit_factorial_sum(__lowercase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__A = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 199 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__a : List[Any] = "Usage of script: script_name <size_of_canvas:int>"
__a : Dict = [0] * 100 + [1] * 10
random.shuffle(choice)
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> list[list[bool]]:
"""simple docstring"""
__A = [[False for i in range(__lowercase )] for j in range(__lowercase )]
return canvas
def _SCREAMING_SNAKE_CASE ( __lowercase : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__lowercase ):
for j, _ in enumerate(__lowercase ):
__A = bool(random.getrandbits(1 ) )
def _SCREAMING_SNAKE_CASE ( __lowercase : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
__A = np.array(__lowercase )
__A = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowercase ):
for c, pt in enumerate(__lowercase ):
__A = __judge_point(
__lowercase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__A = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__A = current_canvas.tolist()
return return_canvas
def _SCREAMING_SNAKE_CASE ( __lowercase : bool , __lowercase : list[list[bool]] ) -> bool:
"""simple docstring"""
__A = 0
__A = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__A = pt
if pt:
if alive < 2:
__A = False
elif alive == 2 or alive == 3:
__A = True
elif alive > 3:
__A = False
else:
if alive == 3:
__A = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__a : str = int(sys.argv[1])
# main working structure of this module.
__a : List[Any] = create_canvas(canvas_size)
seed(c)
__a ,__a : int = plt.subplots()
fig.show()
__a : Union[str, Any] = ListedColormap(["w", "k"])
try:
while True:
__a : Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 199 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowerCamelCase = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_lowerCamelCase = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_lowerCamelCase = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_lowerCamelCase = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_lowerCamelCase = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_lowerCamelCase = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def __UpperCAmelCase( lowercase_ ):
if isinstance(lowercase_ , lowercase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_lowerCamelCase : Any = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
_lowerCamelCase : Any = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
_lowerCamelCase : Optional[Any] = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
_lowerCamelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
_lowerCamelCase : Tuple = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
_lowerCamelCase : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
_lowerCamelCase : str = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
_lowerCamelCase : Any = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
_lowerCamelCase : List[str] = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
_lowerCamelCase : str = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCamelCase : Optional[Any] = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
_lowerCamelCase : Dict = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCamelCase : Optional[int] = checkpoint[F"""{old_prefix}.norm.weight"""]
_lowerCamelCase : Any = checkpoint[F"""{old_prefix}.norm.bias"""]
_lowerCamelCase : str = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Tuple = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Any = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : str = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCamelCase : Union[str, Any] = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : List[str] = torch.load(lowercase_ , map_location='''cpu''' )
_lowerCamelCase : Any = {}
_lowerCamelCase : Optional[int] = checkpoint['''time_embed.0.weight''']
_lowerCamelCase : Optional[int] = checkpoint['''time_embed.0.bias''']
_lowerCamelCase : List[Any] = checkpoint['''time_embed.2.weight''']
_lowerCamelCase : List[Any] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
_lowerCamelCase : Optional[int] = checkpoint['''label_emb.weight''']
_lowerCamelCase : int = checkpoint['''input_blocks.0.0.weight''']
_lowerCamelCase : List[Any] = checkpoint['''input_blocks.0.0.bias''']
_lowerCamelCase : str = unet_config['''down_block_types''']
_lowerCamelCase : List[str] = unet_config['''layers_per_block''']
_lowerCamelCase : Optional[int] = unet_config['''attention_head_dim''']
_lowerCamelCase : Any = unet_config['''block_out_channels''']
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Optional[Any] = channels_list[0]
for i, layer_type in enumerate(lowercase_ ):
_lowerCamelCase : Any = channels_list[i]
_lowerCamelCase : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase_ ):
_lowerCamelCase : Tuple = F"""down_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Any = F"""input_blocks.{current_layer}.0"""
_lowerCamelCase : List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Tuple = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase_ ):
_lowerCamelCase : Optional[int] = F"""down_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Optional[Any] = F"""input_blocks.{current_layer}.0"""
_lowerCamelCase : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : str = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
_lowerCamelCase : List[Any] = F"""down_blocks.{i}.attentions.{j}"""
_lowerCamelCase : Optional[int] = F"""input_blocks.{current_layer}.1"""
_lowerCamelCase : Dict = convert_attention(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
current_layer += 1
if i != len(lowercase_ ) - 1:
_lowerCamelCase : Union[str, Any] = F"""down_blocks.{i}.downsamplers.0"""
_lowerCamelCase : List[Any] = F"""input_blocks.{current_layer}.0"""
_lowerCamelCase : Optional[Any] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
current_layer += 1
_lowerCamelCase : List[Any] = current_channels
# hardcoded the mid-block for now
_lowerCamelCase : Optional[int] = '''mid_block.resnets.0'''
_lowerCamelCase : int = '''middle_block.0'''
_lowerCamelCase : Union[str, Any] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase : List[Any] = '''mid_block.attentions.0'''
_lowerCamelCase : Optional[Any] = '''middle_block.1'''
_lowerCamelCase : Optional[int] = convert_attention(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] = '''mid_block.resnets.1'''
_lowerCamelCase : int = '''middle_block.2'''
_lowerCamelCase : Any = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[Any] = unet_config['''up_block_types''']
for i, layer_type in enumerate(lowercase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : Optional[int] = F"""up_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Any = F"""output_blocks.{current_layer}.0"""
_lowerCamelCase : Tuple = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
current_layer += 1
if i != len(lowercase_ ) - 1:
_lowerCamelCase : Optional[Any] = F"""up_blocks.{i}.upsamplers.0"""
_lowerCamelCase : List[str] = F"""output_blocks.{current_layer-1}.1"""
_lowerCamelCase : Any = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : Tuple = F"""up_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Any = F"""output_blocks.{current_layer}.0"""
_lowerCamelCase : List[Any] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
_lowerCamelCase : Dict = F"""up_blocks.{i}.attentions.{j}"""
_lowerCamelCase : List[str] = F"""output_blocks.{current_layer}.1"""
_lowerCamelCase : Optional[Any] = convert_attention(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
current_layer += 1
if i != len(lowercase_ ) - 1:
_lowerCamelCase : List[str] = F"""up_blocks.{i}.upsamplers.0"""
_lowerCamelCase : Tuple = F"""output_blocks.{current_layer-1}.2"""
_lowerCamelCase : int = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase : Dict = checkpoint['''out.0.weight''']
_lowerCamelCase : Optional[Any] = checkpoint['''out.0.bias''']
_lowerCamelCase : int = checkpoint['''out.2.weight''']
_lowerCamelCase : List[str] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = strabool(args.class_cond)
_lowerCamelCase = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowerCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowerCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_lowerCamelCase = None
_lowerCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
_lowerCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowerCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowerCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
_lowerCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
_lowerCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 114 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = tempfile.mkdtemp()
_lowerCamelCase : List[str] = BlipImageProcessor()
_lowerCamelCase : Union[str, Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
_lowerCamelCase : Any = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
_lowerCamelCase : Any = InstructBlipProcessor(a__ , a__ , a__)
processor.save_pretrained(self.tmpdirname)
def __snake_case ( self , **a__):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a__).tokenizer
def __snake_case ( self , **a__):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a__).image_processor
def __snake_case ( self , **a__):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a__).qformer_tokenizer
def __snake_case ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
_lowerCamelCase : List[str] = [Image.fromarray(np.moveaxis(a__ , 0 , -1)) for x in image_inputs]
return image_inputs
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
_lowerCamelCase : str = self.get_image_processor(do_normalize=a__ , padding_value=1.0)
_lowerCamelCase : Tuple = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , a__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , a__)
self.assertIsInstance(processor.qformer_tokenizer , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Any = self.get_qformer_tokenizer()
_lowerCamelCase : Dict = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : List[Any] = self.prepare_image_inputs()
_lowerCamelCase : List[str] = image_processor(a__ , return_tensors='''np''')
_lowerCamelCase : List[Any] = processor(images=a__ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : int = self.get_qformer_tokenizer()
_lowerCamelCase : Tuple = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : List[Any] = '''lower newer'''
_lowerCamelCase : Any = processor(text=a__)
_lowerCamelCase : Optional[int] = tokenizer(a__ , return_token_type_ids=a__)
_lowerCamelCase : Optional[Any] = qformer_tokenizer(a__ , return_token_type_ids=a__)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_qformer_tokenizer()
_lowerCamelCase : List[Any] = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : List[Any] = '''lower newer'''
_lowerCamelCase : Tuple = self.prepare_image_inputs()
_lowerCamelCase : Tuple = processor(text=a__ , images=a__)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(a__):
processor()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : List[Any] = self.get_qformer_tokenizer()
_lowerCamelCase : Optional[Any] = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Tuple = processor.batch_decode(a__)
_lowerCamelCase : str = tokenizer.batch_decode(a__)
self.assertListEqual(a__ , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : str = self.get_qformer_tokenizer()
_lowerCamelCase : str = InstructBlipProcessor(
tokenizer=a__ , image_processor=a__ , qformer_tokenizer=a__)
_lowerCamelCase : str = '''lower newer'''
_lowerCamelCase : str = self.prepare_image_inputs()
_lowerCamelCase : List[Any] = processor(text=a__ , images=a__)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 114 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ : List[Any] = logging.getLogger()
def UpperCamelCase ( _A : Dict )-> List[str]:
"""simple docstring"""
A__ = {}
A__ = os.path.join(_A , "all_results.json" )
if os.path.exists(_A ):
with open(_A , "r" ) as f:
A__ = json.load(_A )
else:
raise ValueError(f"""can't find {path}""" )
return results
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase ( lowercase__ ):
def __A ( self ):
import xla_spawn
A__ = self.get_auto_remove_tmp_dir()
A__ = F"""\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n """.split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
A__ = time()
xla_spawn.main()
A__ = time()
A__ = get_results(__lowerCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def __A ( self ):
import xla_spawn
A__ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
xla_spawn.main()
| 705 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def UpperCamelCase ( _A : int , _A : List[str] )-> List[str]:
"""simple docstring"""
A__ = checkpoint
A__ = {}
A__ = vae_state_dict["encoder.conv_in.weight"]
A__ = vae_state_dict["encoder.conv_in.bias"]
A__ = vae_state_dict["encoder.conv_out.weight"]
A__ = vae_state_dict["encoder.conv_out.bias"]
A__ = vae_state_dict["encoder.norm_out.weight"]
A__ = vae_state_dict["encoder.norm_out.bias"]
A__ = vae_state_dict["decoder.conv_in.weight"]
A__ = vae_state_dict["decoder.conv_in.bias"]
A__ = vae_state_dict["decoder.conv_out.weight"]
A__ = vae_state_dict["decoder.conv_out.bias"]
A__ = vae_state_dict["decoder.norm_out.weight"]
A__ = vae_state_dict["decoder.norm_out.bias"]
A__ = vae_state_dict["quant_conv.weight"]
A__ = vae_state_dict["quant_conv.bias"]
A__ = vae_state_dict["post_quant_conv.weight"]
A__ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
A__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(_A )
}
# Retrieves the keys for the decoder up blocks only
A__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(_A )
}
for i in range(_A ):
A__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
A__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "encoder.mid.block" in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
A__ = renew_vae_attention_paths(_A )
A__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
conv_attn_to_linear(_A )
for i in range(_A ):
A__ = num_up_blocks - 1 - i
A__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
A__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "decoder.mid.block" in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
A__ = renew_vae_attention_paths(_A )
A__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
conv_attn_to_linear(_A )
return new_checkpoint
def UpperCamelCase ( _A : str , _A : str , )-> str:
"""simple docstring"""
A__ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
A__ = io.BytesIO(r.content )
A__ = OmegaConf.load(_A )
A__ = 512
A__ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
A__ = {}
with safe_open(_A , framework="pt" , device="cpu" ) as f:
for key in f.keys():
A__ = f.get_tensor(_A )
else:
A__ = torch.load(_A , map_location=_A )["state_dict"]
# Convert the VAE model.
A__ = create_vae_diffusers_config(_A , image_size=_A )
A__ = custom_convert_ldm_vae_checkpoint(_A , _A )
A__ = AutoencoderKL(**_A )
vae.load_state_dict(_A )
vae.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase_ : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 232 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[int] ="""imagegpt"""
A__ : Union[str, Any] =["""past_key_values"""]
A__ : Union[str, Any] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCAmelCase_ : Dict=512 + 1 , UpperCAmelCase_ : Union[str, Any]=32 * 32 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Union[str, Any]=24 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple="quick_gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=1e-5 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=False , **UpperCAmelCase_ : List[str] , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ )
class lowercase__ ( _UpperCAmelCase ):
@property
def A_ ( self : List[str] ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def A_ ( self : Optional[int] , UpperCAmelCase_ : "FeatureExtractionMixin" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , ):
SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(preprocessor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) )
return inputs
| 472 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( A__ = "AAPL" ) -> str:
"""simple docstring"""
UpperCamelCase = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase = BeautifulSoup(requests.get(A__ ).text , 'html.parser' )
UpperCamelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 324 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase : Tuple = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase : str = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any="auto" , UpperCamelCase__ : List[str]=-1 , UpperCamelCase__ : int=0.9 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Union[str, Any]=5_0_0 , UpperCamelCase__ : Union[str, Any]="gpt2-large" , UpperCamelCase__ : Union[str, Any]=-1 , UpperCamelCase__ : Dict=1_0_2_4 , UpperCamelCase__ : Dict=2_5 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=2_5 , ):
"""simple docstring"""
UpperCamelCase = compute_mauve(
p_text=UpperCamelCase__ , q_text=UpperCamelCase__ , p_features=UpperCamelCase__ , q_features=UpperCamelCase__ , p_tokens=UpperCamelCase__ , q_tokens=UpperCamelCase__ , num_buckets=UpperCamelCase__ , pca_max_data=UpperCamelCase__ , kmeans_explained_var=UpperCamelCase__ , kmeans_num_redo=UpperCamelCase__ , kmeans_max_iter=UpperCamelCase__ , featurize_model_name=UpperCamelCase__ , device_id=UpperCamelCase__ , max_text_length=UpperCamelCase__ , divergence_curve_discretization_size=UpperCamelCase__ , mauve_scaling_factor=UpperCamelCase__ , verbose=UpperCamelCase__ , seed=UpperCamelCase__ , )
return out
| 324 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__a : Optional[int] = None
__a : List[Any] = logging.get_logger(__name__)
__a : int = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
__a : int = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
__a : Tuple = """▁"""
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = BigBirdTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]="<unk>" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : Dict="[SEP]" , __UpperCAmelCase : Optional[Any]="[MASK]" , __UpperCAmelCase : List[str]="[CLS]" , **__UpperCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = False if not self.vocab_file else True
def lowercase__ ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowercase__ ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 559 |
from __future__ import annotations
def a_ ( __snake_case ) -> list:
'''simple docstring'''
if len(__snake_case ) == 0:
return []
UpperCamelCase_ , UpperCamelCase_ = min(__snake_case ), max(__snake_case )
UpperCamelCase_ = int(max_value - min_value ) + 1
UpperCamelCase_ = [[] for _ in range(__snake_case )]
for i in my_list:
buckets[int(i - min_value )].append(__snake_case )
return [v for bucket in buckets for v in sorted(__snake_case )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 559 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]:
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Tuple = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : List[str] = min_size
_lowerCamelCase : Tuple = max_size
_lowerCamelCase : str = num_labels
_lowerCamelCase : Any = hidden_dim
_lowerCamelCase : Dict = hidden_dim
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5
).float()
_lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long()
_lowerCamelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : List[str] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_lowerCamelCase : Any = self.num_queries
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : int = [1, 1, 1, 1]
_lowerCamelCase : Any = self.num_channels
_lowerCamelCase : Optional[Any] = 64
_lowerCamelCase : str = 128
_lowerCamelCase : Optional[Any] = self.hidden_dim
_lowerCamelCase : Any = self.hidden_dim
_lowerCamelCase : List[Any] = self.hidden_dim
return config
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : str = output.encoder_hidden_states
_lowerCamelCase : int = output.pixel_decoder_hidden_states
_lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]:
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)
comm_check_on_output(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = model(
pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE)
comm_check_on_output(SCREAMING_SNAKE_CASE)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Optional[int] = MaskaFormerModelTester(self)
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE)
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""")
def UpperCamelCase_ ( self) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""")
def UpperCamelCase_ ( self) -> Tuple:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""")
def UpperCamelCase_ ( self) -> List[Any]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""")
def UpperCamelCase_ ( self) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""")
def UpperCamelCase_ ( self) -> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def UpperCamelCase_ ( self) -> Optional[int]:
pass
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : str = [*signature.parameters.keys()]
_lowerCamelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> Optional[int]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Dict = (self.model_tester.min_size,) * 2
_lowerCamelCase : str = {
"""pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE),
"""mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE),
"""class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(),
}
_lowerCamelCase : List[str] = self.model_tester.get_config()
_lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.loss is not None)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.attentions is not None)
def UpperCamelCase_ ( self) -> Optional[Any]:
if not self.model_tester.is_training:
return
_lowerCamelCase : Any = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.train()
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss
loss.backward()
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Any = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : int = True
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
model.train()
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
UpperCAmelCase = 1e-4
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCamelCase_ ( self) -> Union[str, Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384))
with torch.no_grad():
_lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
_lowerCamelCase : Any = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
_lowerCamelCase : Dict = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval()
_lowerCamelCase : Optional[Any] = self.default_image_processor
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384))
with torch.no_grad():
_lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE)
# masks_queries_logits
_lowerCamelCase : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
_lowerCamelCase : Any = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
_lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
# class_queries_logits
_lowerCamelCase : List[str] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval()
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : Tuple = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , )
_lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]]
_lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]]
with torch.no_grad():
_lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.loss is not None)
| 88 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : Optional[int] ={
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ChineseCLIPFeatureExtractor"""]
__lowerCAmelCase : List[Any] =["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 | 0 |
from __future__ import annotations
def _snake_case ( __snake_case , __snake_case = None , __snake_case = None ):
if start is None:
_UpperCamelCase = 0
if end is None:
_UpperCamelCase = len(__snake_case ) - 1
if start >= end:
return
_UpperCamelCase = (start + end) // 2
slowsort(__snake_case , __snake_case , __snake_case )
slowsort(__snake_case , mid + 1 , __snake_case )
if sequence[end] < sequence[mid]:
_UpperCamelCase , _UpperCamelCase = sequence[mid], sequence[end]
slowsort(__snake_case , __snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 71 | import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase_ :
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.get_dummy_input()
@property
def UpperCamelCase_ ( self : Dict ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ):
_UpperCamelCase = 4
_UpperCamelCase = 32
_UpperCamelCase = (32, 32)
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = (batch_size, num_channels) + sizes
_UpperCamelCase = randn_tensor(_A , generator=_A , device=_A )
_UpperCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
_UpperCamelCase = 128
_UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A )
if include_res_hidden_states_tuple:
_UpperCamelCase = torch.manual_seed(1 )
_UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),)
if include_encoder_hidden_states:
_UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A )
if include_skip_sample:
_UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A )
return dummy_input
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
_UpperCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
unet_block.to(_A )
unet_block.eval()
with torch.no_grad():
_UpperCamelCase = unet_block(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
_UpperCamelCase = output[0, -1, -3:, -3:]
_UpperCamelCase = torch.tensor(_A ).to(_A )
assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
model.to(_A )
model.train()
_UpperCamelCase = model(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = randn_tensor(output.shape , device=_A )
_UpperCamelCase = torch.nn.functional.mse_loss(_A , _A )
loss.backward()
| 71 | 1 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase__ ( a__ , a__=False) -> Optional[int]:
"""simple docstring"""
_snake_case : str = OmegaConf.load(a__)
if display:
print(yaml.dump(OmegaConf.to_container(a__)))
return config
def lowerCamelCase__ ( a__ , a__=None , a__=None) -> Union[str, Any]:
"""simple docstring"""
if conf_path is None:
_snake_case : Union[str, Any] = './model_checkpoints/vqgan_only.yaml'
_snake_case : List[Any] = load_config(a__ , display=a__)
_snake_case : List[str] = VQModel(**config.model.params)
if ckpt_path is None:
_snake_case : List[Any] = './model_checkpoints/vqgan_only.pt'
_snake_case : Any = torch.load(a__ , map_location=a__)
if ".ckpt" in ckpt_path:
_snake_case : Any = sd['state_dict']
model.load_state_dict(a__ , strict=a__)
model.to(a__)
del sd
return model
def lowerCamelCase__ ( a__ , a__) -> int:
"""simple docstring"""
_snake_case , _snake_case , _snake_case : Union[str, Any] = model.encode(a__)
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""")
_snake_case : Tuple = model.decode(a__)
return xrec
def lowerCamelCase__ ( a__ , a__=False) -> int:
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = string.rsplit('.' , 1)
if reload:
_snake_case : Union[str, Any] = importlib.import_module(a__)
importlib.reload(a__)
return getattr(importlib.import_module(a__ , package=a__) , cls)
def lowerCamelCase__ ( a__) -> Dict:
"""simple docstring"""
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.')
return get_obj_from_str(config['target'])(**config.get('params' , {}))
def lowerCamelCase__ ( a__ , a__ , a__=True , a__=True) -> Any:
"""simple docstring"""
_snake_case : int = instantiate_from_config(a__)
if sd is not None:
model.load_state_dict(a__)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase__ ( a__ , a__ , a__ , a__) -> Tuple:
"""simple docstring"""
if ckpt:
_snake_case : List[str] = torch.load(a__ , map_location='cpu')
_snake_case : int = pl_sd['global_step']
print(F"""loaded model from global step {global_step}.""")
else:
_snake_case : List[str] = {'state_dict': None}
_snake_case : Tuple = None
_snake_case : str = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=a__ , eval_mode=a__)['model']
return model, global_step
| 517 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a__) -> bool:
"""simple docstring"""
return len(set(a__)) == len(a__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase : int = get_logger(__name__)
class lowercase :
def __init__( self : str , _UpperCamelCase : Optional[str] = None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
SCREAMING_SNAKE_CASE = Extractor
def __snake_case( self : List[Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
SCREAMING_SNAKE_CASE = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def __snake_case( self : Any , _UpperCamelCase : str , _UpperCamelCase : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def __snake_case( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
SCREAMING_SNAKE_CASE = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class lowercase ( a ):
@classmethod
@abstractmethod
def __snake_case( cls : str , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Dict ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
...
class lowercase ( a , a ):
lowercase__ : List[bytes] = []
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
with open(_UpperCamelCase , "rb" ) as f:
return f.read(_UpperCamelCase )
@classmethod
def __snake_case( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
SCREAMING_SNAKE_CASE = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
SCREAMING_SNAKE_CASE = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class lowercase ( a ):
@classmethod
def __snake_case( cls : Dict , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : List[Any] ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def __snake_case( _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : int , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
SCREAMING_SNAKE_CASE = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
SCREAMING_SNAKE_CASE = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(F"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(F"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(F"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class lowercase ( a ):
lowercase__ : Union[str, Any] = [B"""\x1F\x8B"""]
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(_UpperCamelCase , "rb" ) as gzip_file:
with open(_UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class lowercase ( a ):
lowercase__ : Tuple = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def __snake_case( cls : Tuple , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , "rb" ) as fp:
SCREAMING_SNAKE_CASE = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
SCREAMING_SNAKE_CASE = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
SCREAMING_SNAKE_CASE = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class lowercase ( a ):
lowercase__ : Any = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class lowercase ( a ):
lowercase__ : Union[str, Any] = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
SCREAMING_SNAKE_CASE = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class lowercase ( a ):
lowercase__ : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
SCREAMING_SNAKE_CASE = zstd.ZstdDecompressor()
with open(_UpperCamelCase , "rb" ) as ifh, open(_UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class lowercase ( a ):
lowercase__ : Optional[Any] = [B"""\x42\x5A\x68"""]
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(_UpperCamelCase , "rb" ) as compressed_file:
with open(_UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class lowercase ( a ):
lowercase__ : Union[str, Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , "r" ) as archive:
archive.extractall(_UpperCamelCase )
class lowercase ( a ):
lowercase__ : Union[str, Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(_UpperCamelCase , "rb" ) as compressed_file:
with open(_UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class lowercase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowercase__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __snake_case( cls : Any ) -> List[str]:
'''simple docstring'''
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __snake_case( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) -> str:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def __snake_case( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __snake_case( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
SCREAMING_SNAKE_CASE = cls._get_magic_number_max_length()
SCREAMING_SNAKE_CASE = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def __snake_case( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
SCREAMING_SNAKE_CASE = str(Path(_UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = extractor if extractor != "deprecated" else extractor_format
else:
SCREAMING_SNAKE_CASE = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
| 647 | def __lowerCamelCase (UpperCAmelCase__ : int ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0"
raise ValueError(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = sylvester(number - 1 )
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 647 | 1 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "autoformer"
a__ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Tuple , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , _lowercase : str = "student_t" , _lowercase : str = "nll" , _lowercase : int = 1 , _lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , _lowercase : bool = True , _lowercase : int = 0 , _lowercase : int = 0 , _lowercase : int = 0 , _lowercase : int = 0 , _lowercase : Optional[List[int]] = None , _lowercase : Optional[List[int]] = None , _lowercase : int = 64 , _lowercase : int = 2 , _lowercase : int = 2 , _lowercase : int = 2 , _lowercase : int = 2 , _lowercase : int = 32 , _lowercase : int = 32 , _lowercase : str = "gelu" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : int = 1_00 , _lowercase : float = 0.02 , _lowercase : bool = True , _lowercase : Optional[Any]=True , _lowercase : int = 10 , _lowercase : int = 25 , _lowercase : int = 3 , **_lowercase : List[Any] , ):
# time series specific configuration
__UpperCAmelCase = prediction_length
__UpperCAmelCase = context_length if context_length is not None else prediction_length
__UpperCAmelCase = distribution_output
__UpperCAmelCase = loss
__UpperCAmelCase = input_size
__UpperCAmelCase = num_time_features
__UpperCAmelCase = lags_sequence
__UpperCAmelCase = scaling
__UpperCAmelCase = num_dynamic_real_features
__UpperCAmelCase = num_static_real_features
__UpperCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = cardinality
else:
__UpperCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = embedding_dimension
else:
__UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
__UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = use_cache
# Autoformer
__UpperCAmelCase = label_length
__UpperCAmelCase = moving_average
__UpperCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def a ( self : int ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 49 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = StableUnCLIPPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case = False
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = 32
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case_ , projection_dim=snake_case_ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case_ , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=snake_case_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=snake_case_ )
__lowerCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case_ , layers_per_block=1 , upcast_attention=snake_case_ , use_linear_projection=snake_case_ , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=snake_case_ , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def A__ ( self , snake_case_ , snake_case_=0 ) -> Optional[Any]:
if str(snake_case_ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> Tuple:
__lowerCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=snake_case_ )
def A__ ( self ) -> Tuple:
__lowerCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=snake_case_ )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe("""anime turle""" , generator=snake_case_ , output_type="""np""" )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 465 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[Any] = "informer"
__lowercase :Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "student_t" , UpperCamelCase__ = "nll" , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = "mean" , UpperCamelCase__ = 0 , UpperCamelCase__ = 0 , UpperCamelCase__ = 0 , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 64 , UpperCamelCase__ = 32 , UpperCamelCase__ = 32 , UpperCamelCase__ = 2 , UpperCamelCase__ = 2 , UpperCamelCase__ = 2 , UpperCamelCase__ = 2 , UpperCamelCase__ = True , UpperCamelCase__ = "gelu" , UpperCamelCase__ = 0.05 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 100 , UpperCamelCase__ = 0.02 , UpperCamelCase__=True , UpperCamelCase__ = "prob" , UpperCamelCase__ = 5 , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
lowerCamelCase_ = prediction_length
lowerCamelCase_ = context_length or prediction_length
lowerCamelCase_ = distribution_output
lowerCamelCase_ = loss
lowerCamelCase_ = input_size
lowerCamelCase_ = num_time_features
lowerCamelCase_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ = scaling
lowerCamelCase_ = num_dynamic_real_features
lowerCamelCase_ = num_static_real_features
lowerCamelCase_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ = cardinality
else:
lowerCamelCase_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ = embedding_dimension
else:
lowerCamelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = use_cache
# Informer
lowerCamelCase_ = attention_type
lowerCamelCase_ = sampling_factor
lowerCamelCase_ = distil
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 66 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCamelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCamelCase_ = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCamelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCamelCase_ = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase__ )
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=UpperCamelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) | 66 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = (PNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def snake_case_ ( self , **a__):
A__ = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**a__)
return config
def snake_case_ ( self , a__=0 , **a__):
A__ = dict(self.forward_default_kwargs)
A__ = kwargs.pop('''num_inference_steps''' , a__)
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**a__)
A__ = scheduler_class(**a__)
scheduler.set_timesteps(a__)
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__)
A__ = scheduler_class.from_pretrained(a__)
new_scheduler.set_timesteps(a__)
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(a__ , a__ , a__ , **a__).prev_sample
A__ = new_scheduler.step_prk(a__ , a__ , a__ , **a__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(a__ , a__ , a__ , **a__).prev_sample
A__ = new_scheduler.step_plms(a__ , a__ , a__ , **a__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self):
pass
def snake_case_ ( self , a__=0 , **a__):
A__ = dict(self.forward_default_kwargs)
A__ = kwargs.pop('''num_inference_steps''' , a__)
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
scheduler.set_timesteps(a__)
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__)
A__ = scheduler_class.from_pretrained(a__)
# copy over dummy past residuals
new_scheduler.set_timesteps(a__)
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(a__ , a__ , a__ , **a__).prev_sample
A__ = new_scheduler.step_prk(a__ , a__ , a__ , **a__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(a__ , a__ , a__ , **a__).prev_sample
A__ = new_scheduler.step_plms(a__ , a__ , a__ , **a__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **a__):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**a__)
A__ = scheduler_class(**a__)
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(a__)
for i, t in enumerate(scheduler.prk_timesteps):
A__ = model(a__ , a__)
A__ = scheduler.step_prk(a__ , a__ , a__).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
A__ = model(a__ , a__)
A__ = scheduler.step_plms(a__ , a__ , a__).prev_sample
return sample
def snake_case_ ( self):
A__ = dict(self.forward_default_kwargs)
A__ = kwargs.pop('''num_inference_steps''' , a__)
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , '''set_timesteps'''):
scheduler.set_timesteps(a__)
elif num_inference_steps is not None and not hasattr(a__ , '''set_timesteps'''):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(a__ , 0 , a__ , **a__).prev_sample
A__ = scheduler.step_prk(a__ , 1 , a__ , **a__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
A__ = scheduler.step_plms(a__ , 0 , a__ , **a__).prev_sample
A__ = scheduler.step_plms(a__ , 1 , a__ , **a__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def snake_case_ ( self):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a__)
def snake_case_ ( self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a__)
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1)
A__ = scheduler_class(**a__)
scheduler.set_timesteps(1_0)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1]) , )
def snake_case_ ( self):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=a__ , beta_end=a__)
def snake_case_ ( self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a__)
def snake_case_ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__)
def snake_case_ ( self):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=a__)
def snake_case_ ( self):
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=a__)
def snake_case_ ( self):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A__ = 2_7
for scheduler_class in self.scheduler_classes:
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
scheduler.set_timesteps(a__)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
A__ = scheduler.step_prk(a__ , a__ , a__).prev_sample
def snake_case_ ( self):
with self.assertRaises(a__):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**a__)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def snake_case_ ( self):
A__ = self.full_loop()
A__ = torch.sum(torch.abs(a__))
A__ = torch.mean(torch.abs(a__))
assert abs(result_sum.item() - 1_9_8.1_3_1_8) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0) < 1e-3
def snake_case_ ( self):
A__ = self.full_loop(prediction_type='''v_prediction''')
A__ = torch.sum(torch.abs(a__))
A__ = torch.mean(torch.abs(a__))
assert abs(result_sum.item() - 6_7.3_9_8_6) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8) < 1e-3
def snake_case_ ( self):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=a__ , beta_start=0.0_1)
A__ = torch.sum(torch.abs(a__))
A__ = torch.mean(torch.abs(a__))
assert abs(result_sum.item() - 2_3_0.0_3_9_9) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5) < 1e-3
def snake_case_ ( self):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=a__ , beta_start=0.0_1)
A__ = torch.sum(torch.abs(a__))
A__ = torch.mean(torch.abs(a__))
assert abs(result_sum.item() - 1_8_6.9_4_8_2) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4) < 1e-3
| 632 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''timm_backbone'''
def __init__( self , a__=None , a__=3 , a__=True , a__=True , a__=None , **a__ , ):
super().__init__(**a__)
A__ = backbone
A__ = num_channels
A__ = features_only
A__ = use_pretrained_backbone
A__ = True
A__ = out_indices if out_indices is not None else (-1,)
| 632 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def snake_case ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_snake_case = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase : Union[str, Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
__lowerCAmelCase : Optional[int] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE , {
'score': ANY(SCREAMING_SNAKE_CASE ),
'label': ANY(SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE ), 'ymin': ANY(SCREAMING_SNAKE_CASE ), 'xmax': ANY(SCREAMING_SNAKE_CASE ), 'ymax': ANY(SCREAMING_SNAKE_CASE )},
} , )
import datasets
__lowerCAmelCase : List[str] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__lowerCAmelCase : Any = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__lowerCAmelCase : List[Any] = object_detector(SCREAMING_SNAKE_CASE , threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE , {
'score': ANY(SCREAMING_SNAKE_CASE ),
'label': ANY(SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE ), 'ymin': ANY(SCREAMING_SNAKE_CASE ), 'xmax': ANY(SCREAMING_SNAKE_CASE ), 'ymax': ANY(SCREAMING_SNAKE_CASE )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case ( self ) -> Any:
pass
@require_torch
def snake_case ( self ) -> int:
__lowerCAmelCase : Optional[Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__lowerCAmelCase : List[str] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
__lowerCAmelCase : List[str] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def snake_case ( self ) -> Union[str, Any]:
__lowerCAmelCase : str = 'facebook/detr-resnet-50'
__lowerCAmelCase : int = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCAmelCase : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def snake_case ( self ) -> Any:
__lowerCAmelCase : Dict = 'facebook/detr-resnet-50'
__lowerCAmelCase : int = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCAmelCase : int = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def snake_case ( self ) -> str:
__lowerCAmelCase : Tuple = 0.9_9_8_5
__lowerCAmelCase : Union[str, Any] = 'facebook/detr-resnet-50'
__lowerCAmelCase : Tuple = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def snake_case ( self ) -> List[Any]:
__lowerCAmelCase : int = 'Narsil/layoutlmv3-finetuned-funsd'
__lowerCAmelCase : Optional[int] = 0.9_9_9_3
__lowerCAmelCase : Tuple = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE , threshold=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , )
| 703 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
A_ = {
"google/pegasus-xsum": 5_12,
}
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PegasusTokenizer
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<mask_2>" , SCREAMING_SNAKE_CASE="<mask_1>" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1_03 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
__lowerCAmelCase : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError(
F"""additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE )}, but is"""
F""" {type(SCREAMING_SNAKE_CASE )}""" )
__lowerCAmelCase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE ) ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__lowerCAmelCase : Tuple = additional_special_tokens_extended
else:
__lowerCAmelCase : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , mask_token_sent=SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = vocab_file
__lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 123 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Tuple , _A : Any , _A : List[str]=13 , _A : Optional[int]=[30, 30] , _A : List[str]=2 , _A : Union[str, Any]=3 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Tuple=32 , _A : Optional[Any]=5 , _A : List[Any]=4 , _A : Any=37 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Tuple=10 , _A : List[Any]=0.0_2 , _A : Any=3 , _A : Optional[int]=None , _A : Tuple=8 , _A : Optional[Any]=10 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : str = num_labels
UpperCAmelCase__ : List[str] = scope
UpperCAmelCase__ : Union[str, Any] = n_targets
UpperCAmelCase__ : int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase__ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase__ : List[Any] = []
for i in range(self.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_A )
UpperCAmelCase__ : Union[str, Any] = torch.rand(self.n_targets , 4 , device=_A )
labels.append(_A )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = YolosForObjectDetection(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(pixel_values=_A )
UpperCAmelCase__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase__ : Dict = model(pixel_values=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase__ : int = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : str = torch.ones(
size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long )
UpperCAmelCase__ : str = torch.ones(
self.model_tester.n_targets , 4 , device=_A , dtype=torch.float )
labels.append(_A )
UpperCAmelCase__ : str = labels
return inputs_dict
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = YolosModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
UpperCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = True
# in YOLOS, the seq_len is different
UpperCAmelCase__ : Union[str, Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Any = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase__ : str = len(_A )
# Check attention is always last and order is fine
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = 1
self.assertEqual(out_len + added_hidden_states , len(_A ) )
UpperCAmelCase__ : List[str] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(_A : Optional[int] , _A : int , _A : List[str] ):
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Optional[int] = outputs.hidden_states
UpperCAmelCase__ : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
# YOLOS has a different seq_length
UpperCAmelCase__ : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Any = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = YolosModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Dict:
UpperCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_A )
UpperCAmelCase__ : Optional[Any] = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase__ : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Dict = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_A , )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase__ : Any = image_processor.post_process_object_detection(
_A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase__ : str = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_A )
UpperCAmelCase__ : Any = [75, 75, 17, 63, 17]
UpperCAmelCase__ : int = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_A )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , _A , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , _A )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _A ) )
| 75 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 | 0 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
A_ : Dict = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ : str = g.get_repo('huggingface/transformers' )
lowerCamelCase__ : List[str] = repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase : i.created_at , reverse=__lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase : Optional[int] =logging.get_logger(__name__)
_UpperCamelCase : str ={
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'resnet'
SCREAMING_SNAKE_CASE_ = ['basic', 'bottleneck']
def __init__( self , _snake_case=3 , _snake_case=64 , _snake_case=[2_56, 5_12, 10_24, 20_48] , _snake_case=[3, 4, 6, 3] , _snake_case="bottleneck" , _snake_case="relu" , _snake_case=False , _snake_case=None , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
__lowerCamelCase = num_channels
__lowerCamelCase = embedding_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = layer_type
__lowerCamelCase = hidden_act
__lowerCamelCase = downsample_in_first_stage
__lowerCamelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_snake_case ) + 1 )]
__lowerCamelCase , __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 1E-3
| 316 |
'''simple docstring'''
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__lowerCamelCase = n - k
# Calculate C(n,k)
for i in range(A_ ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase_ ( A_ ):
return binomial_coefficient(2 * node_count , A_ ) // (node_count + 1)
def lowerCamelCase_ ( A_ ):
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
__lowerCamelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase_ ( A_ ):
return catalan_number(A_ ) * factorial(A_ )
if __name__ == "__main__":
_UpperCamelCase : Dict =int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 316 | 1 |
def _lowerCAmelCase ( UpperCamelCase__: list[int] , UpperCamelCase__: list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
A , A , A = equationa
A , A , A = equationa
# Calculate the determinants of the matrices
A = aa * ba - aa * ba
A = ca * ba - ca * ba
A = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A = determinant_x / determinant
A = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 546 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : List[Any] = 32
def _lowerCAmelCase ( UpperCamelCase__: Accelerator , UpperCamelCase__: int = 16 ) -> Tuple:
"""simple docstring"""
A = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__: List[Any] ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__: Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A = 16
elif accelerator.mixed_precision != "no":
A = 8
else:
A = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
A = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : List[Any] = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: Any ) -> int:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1":
A = 2
# Initialize accelerator
A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config["""lr"""]
A = int(config["""num_epochs"""] )
A = int(config["""seed"""] )
A = int(config["""batch_size"""] )
A = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
A = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A = batch_size // MAX_GPU_BATCH_SIZE
A = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
A , A = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A = model.to(accelerator.device )
# Instantiate optimizer
A = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
A = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A = model(**UpperCamelCase__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**UpperCamelCase__ )
A = outputs.logits.argmax(dim=-1 )
A , A = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase__ )
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
A = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A = parser.parse_args()
A = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 546 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCAmelCase__ ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def _lowerCamelCase ( UpperCAmelCase__ ) -> int:
'''simple docstring'''
a__ = pipeline(
task=args.task,model=args.model if args.model else None,config=args.config,tokenizer=args.tokenizer,device=args.device,)
a__ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
a__ = PipelineDataFormat.from_str(
format=UpperCAmelCase__,output_path=args.output,input_path=args.input,column=args.column if args.column else nlp.default_input_names,overwrite=args.overwrite,)
return RunCommand(UpperCAmelCase__,UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : Pipeline , _snake_case : PipelineDataFormat ) -> Tuple:
'''simple docstring'''
a__ = nlp
a__ = reader
@staticmethod
def _lowerCAmelCase ( _snake_case : ArgumentParser ) -> Dict:
'''simple docstring'''
a__ = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=UpperCamelCase_ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=UpperCamelCase_ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=UpperCamelCase_ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=UpperCamelCase_ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=UpperCamelCase_ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=UpperCamelCase_ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=UpperCamelCase_ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=UpperCamelCase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=UpperCamelCase_ )
def _lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
a__ , a__ = self._nlp, []
for entry in self._reader:
a__ = nlp(**UpperCamelCase_ ) if self._reader.is_multi_columns else nlp(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
outputs.append(UpperCamelCase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
a__ = self._reader.save_binary(UpperCamelCase_ )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(UpperCamelCase_ )
| 232 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : str = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a_ ( lowercase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'sew-d'
def __init__(self, lowerCamelCase_=3_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_=2, lowerCamelCase_=5_1_2, lowerCamelCase_=2_5_6, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=("p2c", "c2p"), lowerCamelCase_="layer_norm", lowerCamelCase_="gelu_python", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.1, lowerCamelCase_=0.02, lowerCamelCase_=1e-7, lowerCamelCase_=1e-5, lowerCamelCase_="group", lowerCamelCase_="gelu", lowerCamelCase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2), lowerCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowerCamelCase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowerCamelCase_=False, lowerCamelCase_=1_2_8, lowerCamelCase_=1_6, lowerCamelCase_=True, lowerCamelCase_=0.05, lowerCamelCase_=1_0, lowerCamelCase_=2, lowerCamelCase_=0.0, lowerCamelCase_=1_0, lowerCamelCase_=0, lowerCamelCase_="mean", lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=2_5_6, lowerCamelCase_=0, lowerCamelCase_=1, lowerCamelCase_=2, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**__lowerCamelCase, pad_token_id=__lowerCamelCase, bos_token_id=__lowerCamelCase, eos_token_id=__lowerCamelCase )
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : Union[str, Any] = feat_extract_norm
lowerCamelCase__ : List[str] = feat_extract_activation
lowerCamelCase__ : Any = list(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = list(__lowerCamelCase )
lowerCamelCase__ : int = list(__lowerCamelCase )
lowerCamelCase__ : List[str] = conv_bias
lowerCamelCase__ : Union[str, Any] = num_conv_pos_embeddings
lowerCamelCase__ : int = num_conv_pos_embedding_groups
lowerCamelCase__ : Union[str, Any] = len(self.conv_dim )
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Optional[int] = squeeze_factor
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : List[str] = position_buckets
lowerCamelCase__ : List[str] = share_att_key
lowerCamelCase__ : int = relative_attention
lowerCamelCase__ : Dict = norm_rel_ebd
lowerCamelCase__ : Optional[int] = list(__lowerCamelCase )
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Any = hidden_dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Any = activation_dropout
lowerCamelCase__ : List[str] = feat_proj_dropout
lowerCamelCase__ : int = final_dropout
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : List[Any] = feature_layer_norm_eps
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[str] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ : List[str] = apply_spec_augment
lowerCamelCase__ : Optional[int] = mask_time_prob
lowerCamelCase__ : Optional[Any] = mask_time_length
lowerCamelCase__ : Union[str, Any] = mask_time_min_masks
lowerCamelCase__ : List[Any] = mask_feature_prob
lowerCamelCase__ : Optional[Any] = mask_feature_length
lowerCamelCase__ : Any = mask_feature_min_masks
# ctc loss
lowerCamelCase__ : Dict = ctc_loss_reduction
lowerCamelCase__ : List[Any] = ctc_zero_infinity
# sequence classification
lowerCamelCase__ : Optional[Any] = use_weighted_layer_sum
lowerCamelCase__ : Union[str, Any] = classifier_proj_size
@property
def a__ (self ):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case : Union[str, Any] = F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if number < 1:
snake_case : Optional[int] = F"Input value of [number={number}] must be > 0"
raise ValueError(__magic_name__ )
snake_case : Dict = 1
for i in range(1 , __magic_name__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 |
import unittest
from transformers import DonutProcessor
_a : Optional[int] = 'naver-clova-ix/donut-base'
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[Any] = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case : Optional[Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case : Any = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 598 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
A = 0
A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
A = tuple[int, int]
class __snake_case :
def __init__( self, A, A, A, A, A, A, ):
"""simple docstring"""
lowerCamelCase : List[Any] = pos_x
lowerCamelCase : Optional[int] = pos_y
lowerCamelCase : Any = (pos_y, pos_x)
lowerCamelCase : Optional[Any] = goal_x
lowerCamelCase : str = goal_y
lowerCamelCase : Optional[int] = g_cost
lowerCamelCase : str = parent
lowerCamelCase : Dict = self.calculate_heuristic()
lowerCamelCase : str = self.g_cost + self.h_cost
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.pos_x - self.goal_x
lowerCamelCase : Tuple = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A ) + abs(A )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self, A ):
"""simple docstring"""
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self, A, A ):
"""simple docstring"""
lowerCamelCase : Any = Node(start[1], start[0], goal[1], goal[0], 0, A )
lowerCamelCase : Optional[Any] = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, A )
lowerCamelCase : int = [self.start]
lowerCamelCase : list[Node] = []
lowerCamelCase : int = False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A )
self.closed_nodes.append(A )
lowerCamelCase : Dict = self.get_successors(A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A )
else:
# retrieve the best current path
lowerCamelCase : List[Any] = self.open_nodes.pop(self.open_nodes.index(A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A )
else:
self.open_nodes.append(A )
return [self.start.pos]
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : int = []
for action in delta:
lowerCamelCase : Dict = parent.pos_x + action[1]
lowerCamelCase : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A, A, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, A, ) )
return successors
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Any = node
lowerCamelCase : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase : Union[str, Any] = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self, A, A ):
"""simple docstring"""
lowerCamelCase : Any = AStar(A, A )
lowerCamelCase : Optional[Any] = AStar(A, A )
lowerCamelCase : List[str] = False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase : Union[str, Any] = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A, A )
self.fwd_astar.closed_nodes.append(A )
self.bwd_astar.closed_nodes.append(A )
lowerCamelCase : int = current_bwd_node
lowerCamelCase : Any = current_fwd_node
lowerCamelCase : Tuple = {
self.fwd_astar: self.fwd_astar.get_successors(A ),
self.bwd_astar: self.bwd_astar.get_successors(A ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A )
else:
# retrieve the best current path
lowerCamelCase : Any = astar.open_nodes.pop(
astar.open_nodes.index(A ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A )
else:
astar.open_nodes.append(A )
return [self.fwd_astar.start.pos]
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.fwd_astar.retrace_path(A )
lowerCamelCase : List[Any] = self.bwd_astar.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
A = (0, 0)
A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A = time.time()
A = AStar(init, goal)
A = a_star.search()
A = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
A = time.time()
A = BidirectionalAStar(init, goal)
A = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 449 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a__):
_lowerCAmelCase = (DPMSolverSinglestepScheduler,)
_lowerCAmelCase = (('''num_inference_steps''', 25),)
def UpperCAmelCase_ ( self, **A ):
"""simple docstring"""
lowerCamelCase : List[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**A )
return config
def UpperCAmelCase_ ( self, A=0, **A ):
"""simple docstring"""
lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase : Optional[Any] = kwargs.pop('num_inference_steps', A )
lowerCamelCase : Union[str, Any] = self.dummy_sample
lowerCamelCase : Dict = 0.1 * sample
lowerCamelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Optional[Any] = self.get_scheduler_config(**A )
lowerCamelCase : Dict = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals
lowerCamelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
lowerCamelCase : List[Any] = scheduler_class.from_pretrained(A )
new_scheduler.set_timesteps(A )
# copy over dummy past residuals
lowerCamelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase , lowerCamelCase : Optional[int] = sample, sample
for t in range(A, time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase : Dict = scheduler.step(A, A, A, **A ).prev_sample
lowerCamelCase : Optional[int] = new_scheduler.step(A, A, A, **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self, A=0, **A ):
"""simple docstring"""
lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase : str = kwargs.pop('num_inference_steps', A )
lowerCamelCase : Union[str, Any] = self.dummy_sample
lowerCamelCase : List[str] = 0.1 * sample
lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Tuple = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
lowerCamelCase : Tuple = scheduler_class.from_pretrained(A )
# copy over dummy past residuals
new_scheduler.set_timesteps(A )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase : int = scheduler.step(A, A, A, **A ).prev_sample
lowerCamelCase : Dict = new_scheduler.step(A, A, A, **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self, A=None, **A ):
"""simple docstring"""
if scheduler is None:
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config(**A )
lowerCamelCase : Optional[int] = scheduler_class(**A )
lowerCamelCase : List[Any] = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config(**A )
lowerCamelCase : Optional[int] = scheduler_class(**A )
lowerCamelCase : Any = 10
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = model(A, A )
lowerCamelCase : List[str] = scheduler.step(A, A, A ).prev_sample
return sample
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase : Dict = 50
lowerCamelCase : Tuple = self.dummy_model()
lowerCamelCase : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(A )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCamelCase : Any = model(A, A )
lowerCamelCase : Optional[int] = scheduler.step(A, A, A ).prev_sample
lowerCamelCase : Any = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase : str = self.full_loop(scheduler=A )
lowerCamelCase : Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
lowerCamelCase : Dict = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase : str = self.full_loop(scheduler=A )
lowerCamelCase : Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=A )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A, prediction_type=A, sample_max_value=A, algorithm_type='dpmsolver++', solver_order=A, solver_type=A, )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A, solver_type=A, prediction_type=A, algorithm_type=A, )
lowerCamelCase : Optional[Any] = self.full_loop(
solver_order=A, solver_type=A, prediction_type=A, algorithm_type=A, )
assert not torch.isnan(A ).any(), "Samples have nan numbers"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=A )
self.check_over_configs(lower_order_final=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(variance_type=A )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=A, time_step=0 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.full_loop()
lowerCamelCase : str = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.full_loop(use_karras_sigmas=A )
lowerCamelCase : Tuple = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase : Dict = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=A )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config(thresholding=A, dynamic_thresholding_ratio=0 )
lowerCamelCase : str = scheduler_class(**A )
lowerCamelCase : List[Any] = 10
lowerCamelCase : List[str] = self.dummy_model()
lowerCamelCase : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : str = model(A, A )
lowerCamelCase : Tuple = scheduler.step(A, A, A ).prev_sample
assert sample.dtype == torch.floataa
| 449 | 1 |
from jiwer import compute_measures
import datasets
__A : Optional[int] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__A : Tuple = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__A : Tuple = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=False ):
if concatenate_texts:
return compute_measures(__lowerCamelCase , __lowerCamelCase )["wer"]
else:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for prediction, reference in zip(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = compute_measures(__lowerCamelCase , __lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 16 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ):
a__ : Dict = parent
a__ : Dict = 100
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Any = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[str] = scope
a__ : int = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
a__ : str = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
a__ : List[str] = self.type_sequence_label_size
a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Optional[Any] = 1
a__ : List[str] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
a__ : int = self.num_labels
a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : int = BeitModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Dict ):
pass
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : str ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Tuple ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : List[Any] = False
a__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : int = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : List[str] ):
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : str = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCamelCase( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase( self : str ):
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ )
a__ : Optional[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
a__ : Tuple = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def _UpperCamelCase( self : Dict ):
a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ )
a__ : int = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase__ )
a__ : str = self.default_image_processor
a__ : List[str] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(lowerCamelCase__ )
a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : Union[str, Any] = Image.open(ds[0]["file"] )
a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
a__ : Dict = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Tuple ):
a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : List[Any] = model.to(lowerCamelCase__ )
a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : Any = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 37 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = None
# Automatically constructed
lowerCamelCase_ = 'dict'
lowerCamelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase_ = field(default='Audio' , init=lowercase__ , repr=lowercase__ )
def __call__( self : Tuple ):
'''simple docstring'''
return self.pa_type
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, bytes, dict] ):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"bytes": None, "path": value}
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowercase : Optional[int] =BytesIO()
sf.write(UpperCAmelCase__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowercase : Optional[int] =np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowercase : Union[str, Any] =np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 32767
lowercase : Tuple =BytesIO(bytes() )
sf.write(UpperCAmelCase__ , UpperCAmelCase__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : dict , UpperCAmelCase__ : Optional[Dict[str, Union[str, bool, None]]] = None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
lowercase : str =(value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
lowercase : Union[str, Any] =xsplitext(UpperCAmelCase__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
lowercase : List[str] =token_per_repo_id or {}
lowercase : Dict =path.split('''::''' )[-1]
try:
lowercase : Optional[Any] =string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL )['''repo_id''']
lowercase : Optional[int] =token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowercase : List[Any] =None
with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__ ) as f:
lowercase : Any =sf.read(UpperCAmelCase__ )
else:
lowercase : Optional[Any] =sf.read(UpperCAmelCase__ )
lowercase : int =array.T
if self.mono:
lowercase : Tuple =librosa.to_mono(UpperCAmelCase__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowercase : Union[str, Any] =librosa.resample(UpperCAmelCase__ , orig_sr=UpperCAmelCase__ , target_sr=self.sampling_rate )
lowercase : str =self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowercase : List[Any] =pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
lowercase : Tuple =pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase : List[Any] =pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
lowercase : List[str] =pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
lowercase : int =pa.array([Audio().encode_example(UpperCAmelCase__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowercase : Any =storage.field('''bytes''' )
else:
lowercase : Optional[int] =pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowercase : Optional[Any] =storage.field('''path''' )
else:
lowercase : Dict =pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
lowercase : Optional[Any] =pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase__ : Tuple ):
with xopen(UpperCAmelCase__ , '''rb''' ) as f:
lowercase : str =f.read()
return bytes_
lowercase : List[Any] =pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase : Optional[Any] =pa.array(
[os.path.basename(UpperCAmelCase__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowercase : List[str] =pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
| 715 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
from ...configuration_utils import PretrainedConfig
lowerCamelCase__ : List[Any] = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = 'tapas'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1_0.0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="ratio" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Tuple = vocab_size
lowercase__ : str = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[Any] = hidden_act
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_sizes
lowercase__ : Tuple = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Union[str, Any] = positive_label_weight
lowercase__ : Optional[Any] = num_aggregation_labels
lowercase__ : str = aggregation_loss_weight
lowercase__ : str = use_answer_as_supervision
lowercase__ : Union[str, Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : Tuple = temperature
lowercase__ : Any = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : List[str] = use_gumbel_for_aggregation
lowercase__ : Dict = average_approximation_function
lowercase__ : str = cell_selection_preference
lowercase__ : List[Any] = answer_loss_cutoff
lowercase__ : Optional[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : Optional[Any] = average_logits_per_cell
lowercase__ : Tuple = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Optional[Any] = init_cell_selection_weights_to_zero
lowercase__ : Dict = reset_position_index_per_cell
lowercase__ : str = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Union[str, Any] = aggregation_labels
lowercase__ : Optional[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , SCREAMING_SNAKE_CASE_):
lowercase__ : Any = {int(SCREAMING_SNAKE_CASE_): v for k, v in aggregation_labels.items()}
| 12 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case_ : List[str] = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
snake_case_ : Optional[Any] = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Dict = SavedModel()
lowerCamelCase_ : Optional[int] = []
with open(os.path.join(__UpperCAmelCase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
lowerCamelCase_ : str = json.load(__UpperCAmelCase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__UpperCAmelCase )] )
with open(__UpperCAmelCase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
lowerCamelCase_ : Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowerCamelCase_ : str = sorted(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__UpperCAmelCase )
if strict and len(__UpperCAmelCase ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__UpperCAmelCase ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*__UpperCAmelCase , sep="\n" )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
snake_case_ : Union[str, Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 713 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "informer"
lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Optional[Any] , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "student_t" , __magic_name__ : str = "nll" , __magic_name__ : int = 1 , __magic_name__ : List[int] = None , __magic_name__ : Optional[Union[str, bool]] = "mean" , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : Optional[List[int]] = None , __magic_name__ : Optional[List[int]] = None , __magic_name__ : int = 64 , __magic_name__ : int = 32 , __magic_name__ : int = 32 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : bool = True , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.05 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 100 , __magic_name__ : float = 0.02 , __magic_name__ : Optional[int]=True , __magic_name__ : str = "prob" , __magic_name__ : int = 5 , __magic_name__ : bool = True , **__magic_name__ : Tuple , ) -> List[str]:
# time series specific configuration
lowerCamelCase_ : Tuple = prediction_length
lowerCamelCase_ : str = context_length or prediction_length
lowerCamelCase_ : Union[str, Any] = distribution_output
lowerCamelCase_ : List[str] = loss
lowerCamelCase_ : Tuple = input_size
lowerCamelCase_ : int = num_time_features
lowerCamelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ : Optional[int] = scaling
lowerCamelCase_ : str = num_dynamic_real_features
lowerCamelCase_ : List[str] = num_static_real_features
lowerCamelCase_ : Any = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCamelCase_ : Dict = cardinality
else:
lowerCamelCase_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCamelCase_ : Dict = embedding_dimension
else:
lowerCamelCase_ : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ : Dict = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ : int = d_model
lowerCamelCase_ : Union[str, Any] = encoder_attention_heads
lowerCamelCase_ : int = decoder_attention_heads
lowerCamelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase_ : Dict = encoder_layers
lowerCamelCase_ : str = decoder_layers
lowerCamelCase_ : Dict = dropout
lowerCamelCase_ : Optional[int] = attention_dropout
lowerCamelCase_ : Dict = activation_dropout
lowerCamelCase_ : List[Any] = encoder_layerdrop
lowerCamelCase_ : Optional[Any] = decoder_layerdrop
lowerCamelCase_ : Optional[int] = activation_function
lowerCamelCase_ : int = init_std
lowerCamelCase_ : str = use_cache
# Informer
lowerCamelCase_ : str = attention_type
lowerCamelCase_ : Union[str, Any] = sampling_factor
lowerCamelCase_ : List[Any] = distil
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 253 | 0 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_UpperCAmelCase : Union[str, Any] = get_logger(__name__)
class lowercase_ ( enum.Enum ):
"""simple docstring"""
__lowerCAmelCase = "all_checks"
__lowerCAmelCase = "basic_checks"
__lowerCAmelCase = "no_checks"
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[dict] , __snake_case : dict , __snake_case : str=None ):
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__snake_case ) - set(__snake_case ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__snake_case ) - set(__snake_case ) ) )
if len(set(__snake_case ) - set(__snake_case ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__snake_case ) - set(__snake_case ) ) )
_A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_A = ' for ' + verification_name if verification_name is not None else ''
if len(__snake_case ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[dict] , __snake_case : dict ):
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__snake_case ) - set(__snake_case ) ) > 0:
raise ExpectedMoreSplits(str(set(__snake_case ) - set(__snake_case ) ) )
if len(set(__snake_case ) - set(__snake_case ) ) > 0:
raise UnexpectedSplits(str(set(__snake_case ) - set(__snake_case ) ) )
_A = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__snake_case ) > 0:
raise NonMatchingSplitsSizesError(str(__snake_case ) )
logger.info('All the splits matched successfully.' )
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : bool = True ):
if record_checksum:
_A = shaaaa()
with open(__snake_case , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B'' ):
m.update(__snake_case )
_A = m.hexdigest()
else:
_A = None
return {"num_bytes": os.path.getsize(__snake_case ), "checksum": checksum}
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 107 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( __a ):
_UpperCAmelCase = (DDPMScheduler,)
def UpperCamelCase ( self , **A__ ) -> str:
snake_case = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**A__ )
return config
def UpperCamelCase ( self ) -> Optional[Any]:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A__ )
def UpperCamelCase ( self ) -> str:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def UpperCamelCase ( self ) -> Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A__ )
def UpperCamelCase ( self ) -> List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A__ )
def UpperCamelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=A__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A__ , prediction_type=A__ , sample_max_value=A__ , )
def UpperCamelCase ( self ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def UpperCamelCase ( self ) -> List[Any]:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = len(A__ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
snake_case = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(A__ ) )
snake_case = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCamelCase ( self ) -> Any:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case = scheduler_class(**A__ )
snake_case = len(A__ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
snake_case = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(A__ ) )
snake_case = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCamelCase ( self ) -> int:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A__ )
snake_case = scheduler.timesteps
for i, timestep in enumerate(A__ ):
if i == len(A__ ) - 1:
snake_case = -1
else:
snake_case = timesteps[i + 1]
snake_case = scheduler.previous_timestep(A__ )
snake_case = prev_t.item()
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [1_00, 87, 50, 51, 0]
with self.assertRaises(A__ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [1_00, 87, 50, 1, 0]
snake_case = len(A__ )
with self.assertRaises(A__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A__ , timesteps=A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A__ )
| 342 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[str] = len(UpperCamelCase__ ) + 1
_lowercase: Dict = len(UpperCamelCase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowercase: Any = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
# since string of zero length match pattern of zero length
_lowercase: Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , UpperCamelCase__ ):
_lowercase: List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , UpperCamelCase__ ):
_lowercase: Dict = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , UpperCamelCase__ ):
for j in range(1 , UpperCamelCase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowercase: Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowercase: Tuple = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowercase: Optional[Any] = dp[i - 1][j]
else:
_lowercase: List[str] = 0
else:
_lowercase: str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A__ : Optional[Any] = "aab"
A__ : List[Any] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 703 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number | (1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number & ~(1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number ^ (1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( UpperCamelCase__ ):
UpperCamelCase = """"""
UpperCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
UpperCamelCase = None # compression type in fsspec. ex: "gzip"
UpperCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self :Any , __snake_case :str = "" , __snake_case :Optional[str] = None , __snake_case :Optional[dict] = None , **__snake_case :Dict ):
'''simple docstring'''
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__magic_name__ : Any =fsspec.open(
__snake_case , mode="""rb""" , protocol=__snake_case , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__magic_name__ : Union[str, Any] =os.path.basename(self.file.path.split("""::""" )[0] )
__magic_name__ : Optional[int] =(
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
__magic_name__ : Optional[Any] =None
@classmethod
def A__ ( cls :Tuple , __snake_case :str ):
'''simple docstring'''
return super()._strip_protocol(__snake_case ).lstrip("""/""" )
def A__ ( self :Any ):
'''simple docstring'''
if self.dir_cache is None:
__magic_name__ : Optional[Any] ={**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
__magic_name__ : Optional[Any] ={f["""name"""]: f}
def A__ ( self :Any , __snake_case :str ):
'''simple docstring'''
return self.file.open().read()
def A__ ( self :List[Any] , __snake_case :str , __snake_case :str = "rb" , __snake_case :str=None , __snake_case :List[Any]=True , __snake_case :Any=None , **__snake_case :Any , ):
'''simple docstring'''
__magic_name__ : Tuple =self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __A ( UpperCamelCase__ ):
UpperCamelCase = """bz2"""
UpperCamelCase = """bz2"""
UpperCamelCase = """.bz2"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """gzip"""
UpperCamelCase = """gzip"""
UpperCamelCase = """.gz"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """lz4"""
UpperCamelCase = """lz4"""
UpperCamelCase = """.lz4"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xz"""
UpperCamelCase = """xz"""
UpperCamelCase = """.xz"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """zstd"""
UpperCamelCase = """zstd"""
UpperCamelCase = """.zst"""
def __init__( self :Optional[Any] , __snake_case :str , __snake_case :str = "rb" , __snake_case :Optional[str] = None , __snake_case :Optional[dict] = None , __snake_case :int = DEFAULT_BLOCK_SIZE , **__snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__magic_name__ : Dict =self.file.__enter__
class __A :
def __init__( self :List[Any] , __snake_case :List[Any] ):
'''simple docstring'''
__magic_name__ : int =file_
def __enter__( self :Dict ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self :Dict , *__snake_case :str , **__snake_case :Any ):
'''simple docstring'''
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self :Tuple ):
'''simple docstring'''
return iter(self._file )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return next(self._file )
def __getattr__( self :Any , __snake_case :Optional[Any] ):
'''simple docstring'''
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case :Union[str, Any] , **__snake_case :int ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
__magic_name__ : List[str] =fixed_enter
| 21 | from typing import Any
def __A ( _A ):
"""simple docstring"""
if not input_list:
return []
__a = [input_list.count(_A ) for value in input_list]
__a = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCamelCase : List[str] = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _lowerCAmelCase ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase :str =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_UpperCamelCase :Any =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_UpperCamelCase :Any =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase :Tuple =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase :List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=0.02 , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Dict =parent
_UpperCamelCase :Union[str, Any] =batch_size
_UpperCamelCase :Optional[int] =seq_length
_UpperCamelCase :Union[str, Any] =is_training
_UpperCamelCase :Union[str, Any] =use_labels
_UpperCamelCase :List[Any] =vocab_size
_UpperCamelCase :Optional[Any] =hidden_size
_UpperCamelCase :Tuple =num_hidden_layers
_UpperCamelCase :Optional[Any] =num_attention_heads
_UpperCamelCase :List[str] =intermediate_size
_UpperCamelCase :Optional[Any] =hidden_act
_UpperCamelCase :int =hidden_dropout_prob
_UpperCamelCase :List[Any] =attention_probs_dropout_prob
_UpperCamelCase :Union[str, Any] =max_position_embeddings
_UpperCamelCase :Optional[int] =eos_token_id
_UpperCamelCase :List[Any] =pad_token_id
_UpperCamelCase :str =bos_token_id
_UpperCamelCase :List[Any] =initializer_range
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Any =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCamelCase :Union[str, Any] =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCamelCase :Dict =shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase :str =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , )
_UpperCamelCase :Tuple =prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Optional[int] =self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :int =20
_UpperCamelCase :Optional[int] =model_class_name(lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =model.encode(inputs_dict["""input_ids"""] )
_UpperCamelCase :Tuple =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCamelCase :Any =model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCamelCase :List[str] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase :Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase :str =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCamelCase :Optional[int] =model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase :int =model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase :int =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Any =20
_UpperCamelCase :Union[str, Any] =model_class_name(lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =model.encode(inputs_dict["""input_ids"""] )
_UpperCamelCase :Optional[Any] =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCamelCase :str =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase :Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase :Optional[Any] =model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase :Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCamelCase :Optional[int] =model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase :int =model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
_UpperCamelCase :Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
__UpperCAmelCase = 99
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Any =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCamelCase :List[str] =input_ids.shape[0]
_UpperCamelCase :List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Optional[int] =self._get_config_and_data()
_UpperCamelCase :List[Any] =FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase :int =lm_model(input_ids=lowerCAmelCase__ )
_UpperCamelCase :str =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCamelCase :Dict =FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase :Tuple =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCamelCase :int =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCamelCase :str =lm_model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
_UpperCamelCase :Dict =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCamelCase :Union[str, Any] =shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase :List[Any] =np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
_UpperCamelCase :Optional[Any] =np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase__ ( __snake_case , unittest.TestCase , __snake_case ):
__UpperCAmelCase = True
__UpperCAmelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCAmelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =FlaxBlenderbotModelTester(self )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase :int =self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase :int =model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest("""JIT Enabled""" ):
_UpperCamelCase :Dict =encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCamelCase :str =encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase :str =model_class(lowerCAmelCase__ )
_UpperCamelCase :Tuple =model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCamelCase :Tuple ={
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest("""JIT Enabled""" ):
_UpperCamelCase :Any =decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCamelCase :Optional[int] =decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase :Dict =model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCamelCase :Union[str, Any] =np.ones((1, 1) ) * model.config.eos_token_id
_UpperCamelCase :Optional[Any] =model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Tuple ={"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
_UpperCamelCase :Tuple ={"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
_UpperCamelCase :Optional[int] =FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase__ )
_UpperCamelCase :List[str] =BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
_UpperCamelCase :List[Any] =["""Sam"""]
_UpperCamelCase :Dict =tokenizer(lowerCAmelCase__ , return_tensors="""jax""" )
_UpperCamelCase :List[str] =model.generate(**lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase :Tuple ="""Sam is a great name. It means \"sun\" in Gaelic."""
_UpperCamelCase :Union[str, Any] =tokenizer.batch_decode(lowerCAmelCase__ , **lowerCAmelCase__ )
assert generated_txt[0].strip() == tgt_text | 710 | '''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCamelCase : str = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class lowerCamelCase__ ( tr.AbstractTransform ):
def __init__( self , lowerCAmelCase__ = " " ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Dict =sentence_delimiter
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :int =[]
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCamelCase : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCamelCase : str = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCamelCase : int = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_lowerCamelCase : Tuple = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
_lowerCamelCase : Optional[int] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
_UpperCamelCase :str =0
_UpperCamelCase :Tuple =0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Optional[int] =jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 512 | 0 |
def __magic_name__ ( ):
'''simple docstring'''
return 1
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2) + one_pence()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5) + two_pence(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10) + five_pence(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20) + ten_pence(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50) + twenty_pence(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100) + fifty_pence(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200) + one_pound(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = 200):
'''simple docstring'''
return two_pound(lowerCAmelCase_)
if __name__ == "__main__":
print(solution(int(input().strip())))
| 250 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 250 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase( unittest.TestCase ):
def a__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a__ ( self : Optional[Any] ):
_UpperCAmelCase ,_UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_UpperCAmelCase : str = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Tuple = jax.device_count()
_UpperCAmelCase : Any = num_samples * [prompt]
_UpperCAmelCase : Any = sd_pipe.prepare_inputs(_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = replicate(_lowerCamelCase )
_UpperCAmelCase : Any = shard(_lowerCamelCase )
_UpperCAmelCase : Tuple = jax.random.PRNGKey(0 )
_UpperCAmelCase : Any = jax.random.split(_lowerCamelCase , jax.device_count() )
_UpperCAmelCase : Any = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=25 , jit=_lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_UpperCAmelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : int = images[0, 2_53:2_56, 2_53:2_56, -1]
_UpperCAmelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : Optional[Any] = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a__ ( self : Tuple ):
_UpperCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-2"
_UpperCAmelCase ,_UpperCAmelCase : int = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCamelCase , subfolder="scheduler" )
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCamelCase , scheduler=_lowerCamelCase , revision="bf16" , dtype=jnp.bfloataa , )
_UpperCAmelCase : List[Any] = scheduler_params
_UpperCAmelCase : str = "A painting of a squirrel eating a burger"
_UpperCAmelCase : int = jax.device_count()
_UpperCAmelCase : Optional[Any] = num_samples * [prompt]
_UpperCAmelCase : Union[str, Any] = sd_pipe.prepare_inputs(_lowerCamelCase )
_UpperCAmelCase : int = replicate(_lowerCamelCase )
_UpperCAmelCase : Dict = shard(_lowerCamelCase )
_UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
_UpperCAmelCase : Optional[int] = jax.random.split(_lowerCamelCase , jax.device_count() )
_UpperCAmelCase : int = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=25 , jit=_lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_UpperCAmelCase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : int = images[0, 2_53:2_56, 2_53:2_56, -1]
_UpperCAmelCase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : List[str] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 328 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , *_lowerCamelCase : Any , **_lowerCamelCase : Any ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 328 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def __magic_name__ ( ) -> int:
_lowercase : Dict = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowercase : Union[str, Any] = get_sagemaker_input()
else:
_lowercase : str = get_cluster_input()
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE=None ) -> List[Any]:
if subparsers is not None:
_lowercase : Union[str, Any] = subparsers.add_parser('config' , description=SCREAMING_SNAKE_CASE )
else:
_lowercase : List[str] = argparse.ArgumentParser('Accelerate config command' , description=SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : List[str] = get_user_input()
if args.config_file is not None:
_lowercase : Optional[Any] = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE )
print(F"""accelerate configuration saved at {config_file}""" )
def __magic_name__ ( ) -> Optional[int]:
_lowercase : Union[str, Any] = config_command_parser()
_lowercase : Any = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 66 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a = logging.getLogger(__name__)
__a = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
default=_a , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
lowercase = field(
default=_a , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_a )} , )
lowercase = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
default=_a , metadata={"help": "The input training data file (a text file)."} )
lowercase = field(
default=_a , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
lowercase = field(
default=_a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowercase = field(
default=_a , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
lowercase = field(
default=_a , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
lowercase = field(
default=_a , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
lowercase = field(
default=_a , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
lowercase = field(default=_a , metadata={"help": "Whether ot not to use whole word mask."} )
lowercase = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
lowercase = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
lowercase = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
lowercase = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
lowercase = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Tuple:
def _dataset(_lowerCAmelCase , _lowerCAmelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , ref_path=_lowerCAmelCase , )
return LineByLineTextDataset(tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __snake_case( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
snake_case__ : Any = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
snake_case__ : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
snake_case__ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
snake_case__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
snake_case__ : List[Any] = AutoModelWithLMHead.from_config(_lowerCAmelCase )
model.resize_token_embeddings(len(_lowerCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
snake_case__ : List[Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
snake_case__ : List[str] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
snake_case__ : int = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
snake_case__ : Any = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , evaluate=_lowerCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
snake_case__ : str = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
snake_case__ : List[str] = DataCollatorForWholeWordMask(
tokenizer=_lowerCAmelCase , mlm_probability=data_args.mlm_probability )
else:
snake_case__ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ : Optional[Any] = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_collator=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , prediction_loss_only=_lowerCAmelCase , )
# Training
if training_args.do_train:
snake_case__ : Any = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case__ : Dict = trainer.evaluate()
snake_case__ : Dict = math.exp(eval_output["""eval_loss"""] )
snake_case__ : str = {"""perplexity""": perplexity}
snake_case__ : Any = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(_lowerCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , _lowerCAmelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(_lowerCAmelCase )
return results
def __snake_case( _lowerCAmelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 374 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """layoutlmv3"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Tuple=5_0265 , UpperCamelCase__ : Union[str, Any]=768 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : List[Any]=128 , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : List[Any]=128 , UpperCamelCase__ : str=64 , UpperCamelCase__ : Optional[Any]=256 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=224 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=UpperCAmelCase__ , hidden_size=UpperCAmelCase__ , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , intermediate_size=UpperCAmelCase__ , hidden_act=UpperCAmelCase__ , hidden_dropout_prob=UpperCAmelCase__ , attention_probs_dropout_prob=UpperCAmelCase__ , max_position_embeddings=UpperCAmelCase__ , type_vocab_size=UpperCAmelCase__ , initializer_range=UpperCAmelCase__ , layer_norm_eps=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
__magic_name__ = max_ad_position_embeddings
__magic_name__ = coordinate_size
__magic_name__ = shape_size
__magic_name__ = has_relative_attention_bias
__magic_name__ = rel_pos_bins
__magic_name__ = max_rel_pos
__magic_name__ = has_spatial_attention_bias
__magic_name__ = rel_ad_pos_bins
__magic_name__ = max_rel_ad_pos
__magic_name__ = text_embed
__magic_name__ = visual_embed
__magic_name__ = input_size
__magic_name__ = num_channels
__magic_name__ = patch_size
__magic_name__ = classifier_dropout
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = version.parse("""1.12""" )
@property
def _lowercase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def _lowercase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
return 12
def _lowercase ( self : str , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , UpperCAmelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
__magic_name__ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__magic_name__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__magic_name__ = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__magic_name__ = dict(
processor(
UpperCAmelCase__ , text=UpperCAmelCase__ , boxes=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , ) )
return inputs
| 721 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = ["""pixel_values"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = size if size is not None else {"""shortest_edge""": 224}
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
__magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD
__magic_name__ = do_convert_rgb
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image:
"""simple docstring"""
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ )
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__magic_name__ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
__magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
__magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
__magic_name__ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 76 | 0 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a : Any = '''__DUMMY_TRANSFORMERS_USER__'''
a : Optional[Any] = '''Dummy User'''
a : List[str] = '''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'''
a : Union[str, Any] = '''https://hub-ci.huggingface.co'''
a : Optional[Any] = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
a : Dict = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
a : Tuple = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''', lowerCamelCase__ )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''', lowerCamelCase__ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''', lowerCamelCase__ )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''', lowerCamelCase__ )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(lowerCamelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=lowerCamelCase__ )
@pytest.fixture(scope='''session''' )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = HfFolder.get_token()
HfFolder.save_token(lowerCamelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCamelCase__ )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(lowerCamelCase__ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = F"repo_txt_data-{int(time.time() * 10e3 )}"
snake_case_ = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''', private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__, path_or_fileobj=str(lowerCamelCase__ ), path_in_repo='''data/text_data.txt''', repo_id=lowerCamelCase__, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = F"repo_zipped_txt_data-{int(time.time() * 10e3 )}"
snake_case_ = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''', private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__, path_or_fileobj=str(lowerCamelCase__ ), path_in_repo='''data.zip''', repo_id=lowerCamelCase__, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = F"repo_zipped_img_data-{int(time.time() * 10e3 )}"
snake_case_ = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''', private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__, path_or_fileobj=str(lowerCamelCase__ ), path_in_repo='''data.zip''', repo_id=lowerCamelCase__, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 640 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE__ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
SCREAMING_SNAKE_CASE__ = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : Optional[Any] , **UpperCAmelCase_ : Tuple ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : Optional[Any] , **UpperCAmelCase_ : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(UpperCAmelCase_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 'lower newer'
SCREAMING_SNAKE_CASE__ = processor(text=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 'lower newer'
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 'lower newer'
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 713 |
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE__ = str(bin(UpperCamelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE__ = str(bin(UpperCamelCase_ ) )[2:]
if shift_amount >= len(UpperCamelCase_ ):
return "0b0"
SCREAMING_SNAKE_CASE__ = binary_number[: len(UpperCamelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
SCREAMING_SNAKE_CASE__ = '0' + str(bin(UpperCamelCase_ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
SCREAMING_SNAKE_CASE__ = len(bin(UpperCamelCase_ )[3:] ) # Find 2's complement of number
SCREAMING_SNAKE_CASE__ = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE__ = (
'1' + '0' * (binary_number_length - len(UpperCamelCase_ )) + binary_number
)
if shift_amount >= len(UpperCamelCase_ ):
return "0b" + binary_number[0] * len(UpperCamelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 400 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase : Tuple = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase : Optional[Any] = concatenate_datasets
lowerCamelCase : int = DownloadConfig
lowerCamelCase : str = DownloadManager
lowerCamelCase : Dict = DownloadMode
lowerCamelCase : int = DownloadConfig
lowerCamelCase : Union[str, Any] = DownloadMode
lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 70 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = BertTokenizer
A__ : List[Any] = BertTokenizerFast
A__ : Tuple = True
A__ : List[str] = True
A__ : List[str] = filter_non_english
def snake_case__ ( self ) -> Optional[Any]:
super().setUp()
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = "UNwant\u00E9d,running"
A__ = "unwanted, running"
return input_text, output_text
def snake_case__ ( self ) -> List[Any]:
A__ = self.tokenizer_class(self.vocab_file )
A__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [9, 6, 7, 12, 10, 11] )
def snake_case__ ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "UNwant\u00E9d,running"
A__ = tokenizer.tokenize(__UpperCamelCase )
A__ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(__UpperCamelCase )
A__ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# With lower casing
A__ = self.get_tokenizer(do_lower_case=__UpperCamelCase )
A__ = self.get_rust_tokenizer(do_lower_case=__UpperCamelCase )
A__ = "UNwant\u00E9d,running"
A__ = tokenizer.tokenize(__UpperCamelCase )
A__ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(__UpperCamelCase )
A__ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def snake_case__ ( self ) -> List[str]:
A__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def snake_case__ ( self ) -> str:
A__ = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self ) -> Optional[int]:
A__ = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self ) -> int:
A__ = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self ) -> Dict:
A__ = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self ) -> Dict:
A__ = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self ) -> Dict:
A__ = BasicTokenizer(do_lower_case=__UpperCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def snake_case__ ( self ) -> Optional[int]:
A__ = BasicTokenizer()
A__ = "a\n'll !!to?'d of, can't."
A__ = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(__UpperCamelCase ) , __UpperCamelCase )
def snake_case__ ( self ) -> Optional[Any]:
A__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A__ = {}
for i, token in enumerate(__UpperCamelCase ):
A__ = i
A__ = WordpieceTokenizer(vocab=__UpperCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def snake_case__ ( self ) -> str:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def snake_case__ ( self ) -> Optional[int]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def snake_case__ ( self ) -> Union[str, Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def snake_case__ ( self ) -> Dict:
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__UpperCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__UpperCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def snake_case__ ( self ) -> Tuple:
A__ = self.tokenizer_class.from_pretrained("bert-base-uncased" )
A__ = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
A__ = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case__ ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
A__ = tokenizer_r.encode_plus(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase , )
A__ = tokenizer_r.do_lower_case if hasattr(__UpperCamelCase , "do_lower_case" ) else False
A__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def snake_case__ ( self ) -> List[str]:
A__ = ["的", "人", "有"]
A__ = "".join(__UpperCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = True
A__ = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A__ = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A__ = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase )
A__ = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A__ = False
A__ = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A__ = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A__ = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase )
A__ = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A__ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__UpperCamelCase )
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
| 719 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : torch.FloatTensor
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 65536 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = "fourier" , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE__ = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = (32, 32, 64) , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 8 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = False , ) -> Union[str, Any]:
super().__init__()
A__ = sample_size
# time
if time_embedding_type == "fourier":
A__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE__ , log=SCREAMING_SNAKE_CASE__ , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ )
A__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ , downscale_freq_shift=SCREAMING_SNAKE_CASE__ )
A__ = block_out_channels[0]
if use_timestep_embedding:
A__ = block_out_channels[0] * 4
A__ = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE__ , time_embed_dim=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , out_dim=block_out_channels[0] , )
A__ = nn.ModuleList([] )
A__ = None
A__ = nn.ModuleList([] )
A__ = None
# down
A__ = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = output_channel
A__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
A__ = get_down_block(
SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE__ )
# mid
A__ = get_mid_block(
SCREAMING_SNAKE_CASE__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE__ , add_downsample=SCREAMING_SNAKE_CASE__ , )
# up
A__ = list(reversed(SCREAMING_SNAKE_CASE__ ) )
A__ = reversed_block_out_channels[0]
if out_block_type is None:
A__ = out_channels
else:
A__ = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = output_channel
A__ = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE__ ) - 1 else final_upsample_channels
)
A__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
A__ = get_up_block(
SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE__ )
A__ = output_channel
# out
A__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
A__ = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE__ , num_groups_out=SCREAMING_SNAKE_CASE__ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , fc_dim=block_out_channels[-1] // 4 , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ) -> Union[UNetaDOutput, Tuple]:
A__ = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__ ):
A__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(sample.device )
A__ = self.time_proj(SCREAMING_SNAKE_CASE__ )
if self.config.use_timestep_embedding:
A__ = self.time_mlp(SCREAMING_SNAKE_CASE__ )
else:
A__ = timestep_embed[..., None]
A__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A__ = ()
for downsample_block in self.down_blocks:
A__ , A__ = downsample_block(hidden_states=SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A__ = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A__ = down_block_res_samples[-1:]
A__ = down_block_res_samples[:-1]
A__ = upsample_block(SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ )
# 5. post-process
if self.out_block:
A__ = self.out_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE__ )
| 562 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.